code
stringlengths
141
79.4k
apis
sequencelengths
1
23
extract_api
stringlengths
126
73.2k
# Import necessary libraries import hubspot import langchain import openai import streamlit # Define function to analyze customer data using Langchain def analyze_customer_data(customer_data): langchain.analyze(customer_data) # returns analyzed data # Define function to send personalized appointment reminders via email and text message def send_appointment_reminder(customer_email, customer_phone, appt_time): # Create message using OpenAI language model message = openai.generate_message(customer_name, appt_time) # Send email using Hubspot API hubspot.send_email(customer_email, message) # Send text message using Hubspot API hubspot.send_text(customer_phone, message) # Call analyze_customer_data function on customer data analyzed_data = analyze_customer_data(customer_data) # Loop through customers in analyzed_data for customer in analyzed_data: # Check if customer has an appointment scheduled if customer['appointment_time'] != None: # Send personalized appointment reminder to customer via email and text message send_appointment_reminder(customer['email'], customer['phone'], customer['appointment_time'])
[ "langchain.analyze" ]
[((206, 238), 'langchain.analyze', 'langchain.analyze', (['customer_data'], {}), '(customer_data)\n', (223, 238), False, 'import langchain\n'), ((499, 548), 'openai.generate_message', 'openai.generate_message', (['customer_name', 'appt_time'], {}), '(customer_name, appt_time)\n', (522, 548), False, 'import openai\n'), ((590, 633), 'hubspot.send_email', 'hubspot.send_email', (['customer_email', 'message'], {}), '(customer_email, message)\n', (608, 633), False, 'import hubspot\n'), ((682, 724), 'hubspot.send_text', 'hubspot.send_text', (['customer_phone', 'message'], {}), '(customer_phone, message)\n', (699, 724), False, 'import hubspot\n')]
import langchain from dotenv import load_dotenv from langchain.agents import initialize_agent, AgentType from langchain.chat_models import ChatOpenAI from datetime import timedelta, datetime import chainlit as cl from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin # 加载.env文件中的环境变量 load_dotenv() langchain.debug = True today = (datetime.now()).strftime("%Y%m%d") yeaterday = (datetime.now() - timedelta(days=1)).strftime("%Y%m%d") today_d = (datetime.now()).strftime("%Y-%m-%d") yeaterday_d = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d") custom_prefix = f""" You are an agent designed to interact with a Trino SQL database. Please do not answer others questions,use Chinese to answer questions,think it step by step. NOTE: data rule: date string format YYYYMMDD. for example, today is {today}, then yeaterday is {yeaterday}, and so on. date string format YYYY-MM-DD. for example, today is {today_d}, then yeaterday is {yeaterday_d}, and so on. column dt is table data version string, not a business date. sql rule: sql generation not need to end with ; only sql itself. get answer from single sql after plan logics step by step. You have access to the following tools:""" @cl.on_chat_start async def start(): # Initialize model model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, verbose=False, streaming=True) custom_tool_list = [CustomTrinoListTable(), CustomTrinoTableSchema(), CustomTrinoSqlQuery(), CustomTrinoSqlCheck(), CustomTrinoTableJoin()] agent_executor = initialize_agent( custom_tool_list, llm=model, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_iterations=6, agent_kwargs={"prefix": custom_prefix}, handle_parsing_errors="Check your output and make sure it conforms" ) cl.user_session.set("agent", agent_executor) # Send the initial message elements = [ cl.Text(name="提问:", content="计算订单明细表的dt为昨天,销售日期为本月的总销售数量", display="inline"), cl.Text(name="我能生成SQL脚本:", content=f"SELECT SUM(num) AS total_sales_quantity FROM gjdw.dw_sale_tr_goods_dt WHERE dt = '{today}' AND dates >= '2023-10-01' AND dates <= '2023-10-31'", display="inline", language="SQL"), cl.Text(name="最终结果:", content="订单明细表的dt为昨天,销售日期为本月的总销售数量是0。", display="inline"), ] content = "Hi,我是 Trino SQL Agent ,我能帮助你查询trino数据库。您可以向我提问,例如:" await cl.Message(content=content, elements=elements).send() @cl.on_message async def main(message: cl.Message): agent = cl.user_session.get("agent") # type: #AgentExecutor cb = cl.LangchainCallbackHandler(stream_final_answer=True) print(message) await cl.make_async(agent.run)(message, callbacks=[cb]) # def ask(input: str) -> str: # print("-- Serving request for input: %s" % input) # try: # response = agent_executor.run(input) # except Exception as e: # response = str(e) # if response.startswith("Could not parse LLM output: `"): # response = response.removeprefix("Could not parse LLM output: `").removesuffix("`") # return response # agent_executor.run(" table gjdw.dw_sale_tr_goods_dt has column named bill_code ?") # ask("计算订单明细表的dt为昨天,销售日期为本月初的总销售数量")
[ "langchain.agents.initialize_agent", "langchain.chat_models.ChatOpenAI" ]
[((371, 384), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (382, 384), False, 'from dotenv import load_dotenv\n'), ((1351, 1439), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'verbose': '(False)', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', temperature=0, verbose=False,\n streaming=True)\n", (1361, 1439), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1603, 1848), 'langchain.agents.initialize_agent', 'initialize_agent', (['custom_tool_list'], {'llm': 'model', 'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)', 'max_iterations': '(6)', 'agent_kwargs': "{'prefix': custom_prefix}", 'handle_parsing_errors': '"""Check your output and make sure it conforms"""'}), "(custom_tool_list, llm=model, agent=AgentType.\n ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_iterations=6,\n agent_kwargs={'prefix': custom_prefix}, handle_parsing_errors=\n 'Check your output and make sure it conforms')\n", (1619, 1848), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((1901, 1945), 'chainlit.user_session.set', 'cl.user_session.set', (['"""agent"""', 'agent_executor'], {}), "('agent', agent_executor)\n", (1920, 1945), True, 'import chainlit as cl\n'), ((2597, 2625), 'chainlit.user_session.get', 'cl.user_session.get', (['"""agent"""'], {}), "('agent')\n", (2616, 2625), True, 'import chainlit as cl\n'), ((2659, 2712), 'chainlit.LangchainCallbackHandler', 'cl.LangchainCallbackHandler', ([], {'stream_final_answer': '(True)'}), '(stream_final_answer=True)\n', (2686, 2712), True, 'import chainlit as cl\n'), ((417, 431), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (429, 431), False, 'from datetime import timedelta, datetime\n'), ((531, 545), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (543, 545), False, 'from datetime import timedelta, datetime\n'), ((1461, 1483), 'utils.custom_tools.CustomTrinoListTable', 'CustomTrinoListTable', ([], {}), '()\n', (1481, 1483), False, 'from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin\n'), ((1485, 1509), 'utils.custom_tools.CustomTrinoTableSchema', 'CustomTrinoTableSchema', ([], {}), '()\n', (1507, 1509), False, 'from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin\n'), ((1511, 1532), 'utils.custom_tools.CustomTrinoSqlQuery', 'CustomTrinoSqlQuery', ([], {}), '()\n', (1530, 1532), False, 'from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin\n'), ((1534, 1555), 'utils.custom_tools.CustomTrinoSqlCheck', 'CustomTrinoSqlCheck', ([], {}), '()\n', (1553, 1555), False, 'from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin\n'), ((1557, 1579), 'utils.custom_tools.CustomTrinoTableJoin', 'CustomTrinoTableJoin', ([], {}), '()\n', (1577, 1579), False, 'from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin\n'), ((2002, 2078), 'chainlit.Text', 'cl.Text', ([], {'name': '"""提问:"""', 'content': '"""计算订单明细表的dt为昨天,销售日期为本月的总销售数量"""', 'display': '"""inline"""'}), "(name='提问:', content='计算订单明细表的dt为昨天,销售日期为本月的总销售数量', display='inline')\n", (2009, 2078), True, 'import chainlit as cl\n'), ((2088, 2313), 'chainlit.Text', 'cl.Text', ([], {'name': '"""我能生成SQL脚本:"""', 'content': 'f"""SELECT SUM(num) AS total_sales_quantity FROM gjdw.dw_sale_tr_goods_dt WHERE dt = \'{today}\' AND dates >= \'2023-10-01\' AND dates <= \'2023-10-31\'"""', 'display': '"""inline"""', 'language': '"""SQL"""'}), '(name=\'我能生成SQL脚本:\', content=\n f"SELECT SUM(num) AS total_sales_quantity FROM gjdw.dw_sale_tr_goods_dt WHERE dt = \'{today}\' AND dates >= \'2023-10-01\' AND dates <= \'2023-10-31\'"\n , display=\'inline\', language=\'SQL\')\n', (2095, 2313), True, 'import chainlit as cl\n'), ((2313, 2392), 'chainlit.Text', 'cl.Text', ([], {'name': '"""最终结果:"""', 'content': '"""订单明细表的dt为昨天,销售日期为本月的总销售数量是0。"""', 'display': '"""inline"""'}), "(name='最终结果:', content='订单明细表的dt为昨天,销售日期为本月的总销售数量是0。', display='inline')\n", (2320, 2392), True, 'import chainlit as cl\n'), ((465, 479), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (477, 479), False, 'from datetime import timedelta, datetime\n'), ((482, 499), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (491, 499), False, 'from datetime import timedelta, datetime\n'), ((583, 597), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (595, 597), False, 'from datetime import timedelta, datetime\n'), ((600, 617), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (609, 617), False, 'from datetime import timedelta, datetime\n'), ((2742, 2766), 'chainlit.make_async', 'cl.make_async', (['agent.run'], {}), '(agent.run)\n', (2755, 2766), True, 'import chainlit as cl\n'), ((2477, 2523), 'chainlit.Message', 'cl.Message', ([], {'content': 'content', 'elements': 'elements'}), '(content=content, elements=elements)\n', (2487, 2523), True, 'import chainlit as cl\n')]
import asyncio import inspect import warnings from abc import ABC, abstractmethod from functools import partial from typing import ( Any, AsyncIterator, Dict, Iterator, List, Optional, Sequence, cast, ) import langchain from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain.load.dump import dumpd, dumps from langchain.prompts.base import StringPromptValue from langchain.prompts.chat import ChatPromptValue from langchain.pydantic_v1 import Field, root_validator from langchain.schema import ( ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo, ) from langchain.schema.language_model import BaseLanguageModel, LanguageModelInput from langchain.schema.messages import ( AIMessage, BaseMessage, BaseMessageChunk, HumanMessage, ) from langchain.schema.output import ChatGenerationChunk from langchain.schema.runnable import RunnableConfig def _get_verbosity() -> bool: return langchain.verbose class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC): """Base class for Chat models.""" cache: Optional[bool] = None """Whether to cache the response.""" verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) """Callbacks to add to the run trace.""" callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) """Callback manager to add to the run trace.""" tags: Optional[List[str]] = Field(default=None, exclude=True) """Tags to add to the run trace.""" metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True) """Metadata to add to the run trace.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True # --- Runnable methods --- def _convert_input(self, input: LanguageModelInput) -> PromptValue: if isinstance(input, PromptValue): return input elif isinstance(input, str): return StringPromptValue(text=input) elif isinstance(input, list): return ChatPromptValue(messages=input) else: raise ValueError( f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) def invoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> BaseMessageChunk: config = config or {} return cast( BaseMessageChunk, cast( ChatGeneration, self.generate_prompt( [self._convert_input(input)], stop=stop, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), **kwargs, ).generations[0][0], ).message, ) async def ainvoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> BaseMessageChunk: if type(self)._agenerate == BaseChatModel._agenerate: # model doesn't implement async generation, so use default implementation return await asyncio.get_running_loop().run_in_executor( None, partial(self.invoke, input, config, stop=stop, **kwargs) ) config = config or {} llm_result = await self.agenerate_prompt( [self._convert_input(input)], stop=stop, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), **kwargs, ) return cast( BaseMessageChunk, cast(ChatGeneration, llm_result.generations[0][0]).message ) def stream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> Iterator[BaseMessageChunk]: if type(self)._stream == BaseChatModel._stream: # model doesn't implement streaming, so use default implementation yield self.invoke(input, config=config, stop=stop, **kwargs) else: config = config or {} messages = self._convert_input(input).to_messages() params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop, **kwargs} callback_manager = CallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = callback_manager.on_chat_model_start( dumpd(self), [messages], invocation_params=params, options=options ) try: message: Optional[BaseMessageChunk] = None for chunk in self._stream( messages, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.message if message is None: message = chunk.message else: message += chunk.message assert message is not None except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e else: run_manager.on_llm_end( LLMResult(generations=[[ChatGeneration(message=message)]]), ) async def astream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> AsyncIterator[BaseMessageChunk]: if type(self)._astream == BaseChatModel._astream: # model doesn't implement streaming, so use default implementation yield self.invoke(input, config=config, stop=stop, **kwargs) else: config = config or {} messages = self._convert_input(input).to_messages() params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop, **kwargs} callback_manager = AsyncCallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = await callback_manager.on_chat_model_start( dumpd(self), [messages], invocation_params=params, options=options ) try: message: Optional[BaseMessageChunk] = None async for chunk in self._astream( messages, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.message if message is None: message = chunk.message else: message += chunk.message assert message is not None except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e) raise e else: await run_manager.on_llm_end( LLMResult(generations=[[ChatGeneration(message=message)]]), ) # --- Custom methods --- def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: return {} def _get_invocation_params( self, stop: Optional[List[str]] = None, **kwargs: Any, ) -> dict: params = self.dict() params["stop"] = stop return {**params, **kwargs} def _get_llm_string(self, stop: Optional[List[str]] = None, **kwargs: Any) -> str: if self.lc_serializable: params = {**kwargs, **{"stop": stop}} param_string = str(sorted([(k, v) for k, v in params.items()])) llm_string = dumps(self) return llm_string + "---" + param_string else: params = self._get_invocation_params(stop=stop, **kwargs) params = {**params, **kwargs} return str(sorted([(k, v) for k, v in params.items()])) def generate( self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> LLMResult: """Top Level call""" params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop} callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose, tags, self.tags, metadata, self.metadata, ) run_managers = callback_manager.on_chat_model_start( dumpd(self), messages, invocation_params=params, options=options ) results = [] for i, m in enumerate(messages): try: results.append( self._generate_with_cache( m, stop=stop, run_manager=run_managers[i] if run_managers else None, **kwargs, ) ) except (KeyboardInterrupt, Exception) as e: if run_managers: run_managers[i].on_llm_error(e) raise e flattened_outputs = [ LLMResult(generations=[res.generations], llm_output=res.llm_output) for res in results ] llm_output = self._combine_llm_outputs([res.llm_output for res in results]) generations = [res.generations for res in results] output = LLMResult(generations=generations, llm_output=llm_output) if run_managers: run_infos = [] for manager, flattened_output in zip(run_managers, flattened_outputs): manager.on_llm_end(flattened_output) run_infos.append(RunInfo(run_id=manager.run_id)) output.run = run_infos return output async def agenerate( self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> LLMResult: """Top Level call""" params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop} callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose, tags, self.tags, metadata, self.metadata, ) run_managers = await callback_manager.on_chat_model_start( dumpd(self), messages, invocation_params=params, options=options ) results = await asyncio.gather( *[ self._agenerate_with_cache( m, stop=stop, run_manager=run_managers[i] if run_managers else None, **kwargs, ) for i, m in enumerate(messages) ], return_exceptions=True, ) exceptions = [] for i, res in enumerate(results): if isinstance(res, Exception): if run_managers: await run_managers[i].on_llm_error(res) exceptions.append(res) if exceptions: if run_managers: await asyncio.gather( *[ run_manager.on_llm_end( LLMResult( generations=[res.generations], llm_output=res.llm_output ) ) for run_manager, res in zip(run_managers, results) if not isinstance(res, Exception) ] ) raise exceptions[0] flattened_outputs = [ LLMResult(generations=[res.generations], llm_output=res.llm_output) for res in results ] llm_output = self._combine_llm_outputs([res.llm_output for res in results]) generations = [res.generations for res in results] output = LLMResult(generations=generations, llm_output=llm_output) await asyncio.gather( *[ run_manager.on_llm_end(flattened_output) for run_manager, flattened_output in zip( run_managers, flattened_outputs ) ] ) if run_managers: output.run = [ RunInfo(run_id=run_manager.run_id) for run_manager in run_managers ] return output def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: prompt_messages = [p.to_messages() for p in prompts] return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: prompt_messages = [p.to_messages() for p in prompts] return await self.agenerate( prompt_messages, stop=stop, callbacks=callbacks, **kwargs ) def _generate_with_cache( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) disregard_cache = self.cache is not None and not self.cache if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) if new_arg_supported: return self._generate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: return self._generate(messages, stop=stop, **kwargs) else: llm_string = self._get_llm_string(stop=stop, **kwargs) prompt = dumps(messages) cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): return ChatResult(generations=cache_val) else: if new_arg_supported: result = self._generate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: result = self._generate(messages, stop=stop, **kwargs) langchain.llm_cache.update(prompt, llm_string, result.generations) return result async def _agenerate_with_cache( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) disregard_cache = self.cache is not None and not self.cache if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) if new_arg_supported: return await self._agenerate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: return await self._agenerate(messages, stop=stop, **kwargs) else: llm_string = self._get_llm_string(stop=stop, **kwargs) prompt = dumps(messages) cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): return ChatResult(generations=cache_val) else: if new_arg_supported: result = await self._agenerate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: result = await self._agenerate(messages, stop=stop, **kwargs) langchain.llm_cache.update(prompt, llm_string, result.generations) return result @abstractmethod def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Top Level call""" async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Top Level call""" raise NotImplementedError() def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: raise NotImplementedError() def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: raise NotImplementedError() def __call__( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseMessage: generation = self.generate( [messages], stop=stop, callbacks=callbacks, **kwargs ).generations[0][0] if isinstance(generation, ChatGeneration): return generation.message else: raise ValueError("Unexpected generation type") async def _call_async( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseMessage: result = await self.agenerate( [messages], stop=stop, callbacks=callbacks, **kwargs ) generation = result.generations[0][0] if isinstance(generation, ChatGeneration): return generation.message else: raise ValueError("Unexpected generation type") def call_as_llm( self, message: str, stop: Optional[List[str]] = None, **kwargs: Any ) -> str: return self.predict(message, stop=stop, **kwargs) def predict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) result = self([HumanMessage(content=text)], stop=_stop, **kwargs) return result.content def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: if stop is None: _stop = None else: _stop = list(stop) return self(messages, stop=_stop, **kwargs) async def apredict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) result = await self._call_async( [HumanMessage(content=text)], stop=_stop, **kwargs ) return result.content async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(messages, stop=_stop, **kwargs) @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {} @property @abstractmethod def _llm_type(self) -> str: """Return type of chat model.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict class SimpleChatModel(BaseChatModel): """Simple Chat Model.""" def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs) message = AIMessage(content=output_str) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) @abstractmethod def _call( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Simpler interface.""" async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: func = partial( self._generate, messages, stop=stop, run_manager=run_manager, **kwargs ) return await asyncio.get_event_loop().run_in_executor(None, func)
[ "langchain.pydantic_v1.Field", "langchain.callbacks.manager.AsyncCallbackManager.configure", "langchain.schema.messages.AIMessage", "langchain.schema.ChatResult", "langchain.load.dump.dumps", "langchain.callbacks.manager.CallbackManager.configure", "langchain.load.dump.dumpd", "langchain.schema.RunInfo", "langchain.schema.messages.HumanMessage", "langchain.prompts.chat.ChatPromptValue", "langchain.schema.ChatGeneration", "langchain.prompts.base.StringPromptValue", "langchain.llm_cache.lookup", "langchain.llm_cache.update", "langchain.schema.LLMResult", "langchain.pydantic_v1.root_validator" ]
[((1364, 1401), 'langchain.pydantic_v1.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1369, 1401), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1475, 1508), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1480, 1508), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1608, 1641), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1613, 1641), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1726, 1759), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1731, 1759), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1841, 1874), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1846, 1874), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1925, 1941), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (1939, 1941), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((9835, 9947), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose, tags,\n self.tags, metadata, self.metadata)\n', (9860, 9947), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((11036, 11093), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (11045, 11093), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((11869, 11986), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose,\n tags, self.tags, metadata, self.metadata)\n', (11899, 11986), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((13721, 13778), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (13730, 13778), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((22871, 22900), 'langchain.schema.messages.AIMessage', 'AIMessage', ([], {'content': 'output_str'}), '(content=output_str)\n', (22880, 22900), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((22922, 22953), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (22936, 22953), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((22969, 23005), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': '[generation]'}), '(generations=[generation])\n', (22979, 23005), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((23517, 23596), 'functools.partial', 'partial', (['self._generate', 'messages'], {'stop': 'stop', 'run_manager': 'run_manager'}), '(self._generate, messages, stop=stop, run_manager=run_manager, **kwargs)\n', (23524, 23596), False, 'from functools import partial\n'), ((2132, 2234), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2145, 2234), False, 'import warnings\n'), ((9118, 9129), 'langchain.load.dump.dumps', 'dumps', (['self'], {}), '(self)\n', (9123, 9129), False, 'from langchain.load.dump import dumpd, dumps\n'), ((10112, 10123), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (10117, 10123), False, 'from langchain.load.dump import dumpd, dumps\n'), ((10767, 10834), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (10776, 10834), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((13452, 13519), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (13461, 13519), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16027, 16042), 'langchain.load.dump.dumps', 'dumps', (['messages'], {}), '(messages)\n', (16032, 16042), False, 'from langchain.load.dump import dumpd, dumps\n'), ((16067, 16113), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (16093, 16113), False, 'import langchain\n'), ((17747, 17762), 'langchain.load.dump.dumps', 'dumps', (['messages'], {}), '(messages)\n', (17752, 17762), False, 'from langchain.load.dump import dumpd, dumps\n'), ((17787, 17833), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (17813, 17833), False, 'import langchain\n'), ((2713, 2742), 'langchain.prompts.base.StringPromptValue', 'StringPromptValue', ([], {'text': 'input'}), '(text=input)\n', (2730, 2742), False, 'from langchain.prompts.base import StringPromptValue\n'), ((4629, 4679), 'typing.cast', 'cast', (['ChatGeneration', 'llm_result.generations[0][0]'], {}), '(ChatGeneration, llm_result.generations[0][0])\n', (4633, 4679), False, 'from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Sequence, cast\n'), ((5755, 5766), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (5760, 5766), False, 'from langchain.load.dump import dumpd, dumps\n'), ((12158, 12169), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (12163, 12169), False, 'from langchain.load.dump import dumpd, dumps\n'), ((14101, 14135), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (14108, 14135), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16181, 16214), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': 'cache_val'}), '(generations=cache_val)\n', (16191, 16214), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16530, 16596), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result.generations'], {}), '(prompt, llm_string, result.generations)\n', (16556, 16596), False, 'import langchain\n'), ((17901, 17934), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': 'cache_val'}), '(generations=cache_val)\n', (17911, 17934), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((18264, 18330), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result.generations'], {}), '(prompt, llm_string, result.generations)\n', (18290, 18330), False, 'import langchain\n'), ((20898, 20924), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {'content': 'text'}), '(content=text)\n', (20910, 20924), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((2800, 2831), 'langchain.prompts.chat.ChatPromptValue', 'ChatPromptValue', ([], {'messages': 'input'}), '(messages=input)\n', (2815, 2831), False, 'from langchain.prompts.chat import ChatPromptValue\n'), ((4200, 4256), 'functools.partial', 'partial', (['self.invoke', 'input', 'config'], {'stop': 'stop'}), '(self.invoke, input, config, stop=stop, **kwargs)\n', (4207, 4256), False, 'from functools import partial\n'), ((7657, 7668), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (7662, 7668), False, 'from langchain.load.dump import dumpd, dumps\n'), ((11315, 11345), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'manager.run_id'}), '(run_id=manager.run_id)\n', (11322, 11345), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((15194, 15227), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (15211, 15227), False, 'import inspect\n'), ((16899, 16933), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (16916, 16933), False, 'import inspect\n'), ((21574, 21600), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {'content': 'text'}), '(content=text)\n', (21586, 21600), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((23640, 23664), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (23662, 23664), False, 'import asyncio\n'), ((4134, 4160), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (4158, 4160), False, 'import asyncio\n'), ((6521, 6552), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (6535, 6552), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((13049, 13116), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (13058, 13116), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((8442, 8473), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (8456, 8473), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n')]
import sys import chromadb import pandas import sqlite3 from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import LLMChainExtractor from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import Chroma from langchain.document_loaders import TextLoader from langchain.document_loaders import WikipediaLoader from langchain.retrievers.multi_query import MultiQueryRetriever import langchain from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from langchain.schema import AIMessage, HumanMessage, SystemMessage from langchain.cache import InMemoryCache from langchain import PromptTemplate import os import openai from langchain.prompts import ( ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.schema import AIMessage, HumanMessage, SystemMessage os.environ["OPENAI_API_KEY"] = "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" openai.api_key = "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" api_key = "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" llm = OpenAI() # chat = ChatOpenAI(openai_api_key=api_key, temperature=0) embedding_function = OpenAIEmbeddings() def us_constitution_helper(question): loader = TextLoader("some_data/US_Constitution.txt") documents = loader.load() text_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=500) docs = text_splitter.split_documents(documents) _embedding_function = OpenAIEmbeddings() db = Chroma.from_documents( docs, _embedding_function, persist_directory="./US_Constitution" ) db.persist() chat = ChatOpenAI(openai_api_key=api_key, temperature=0) compressor = LLMChainExtractor.from_llm(chat) compression_retriever = ContextualCompressionRetriever( base_compressor=compressor, base_retriever=db.as_retriever() ) compressed_docs = compression_retriever.get_relevant_documents(question) return compressed_docs[0].page_content print(us_constitution_helper("What is the 13th Amendment?"))
[ "langchain.document_loaders.TextLoader", "langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder", "langchain.llms.OpenAI", "langchain.chat_models.ChatOpenAI", "langchain.vectorstores.Chroma.from_documents", "langchain.embeddings.OpenAIEmbeddings", "langchain.retrievers.document_compressors.LLMChainExtractor.from_llm" ]
[((1247, 1255), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (1253, 1255), False, 'from langchain.llms import OpenAI\n'), ((1336, 1354), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1352, 1354), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1408, 1451), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""some_data/US_Constitution.txt"""'], {}), "('some_data/US_Constitution.txt')\n", (1418, 1451), False, 'from langchain.document_loaders import TextLoader\n'), ((1503, 1562), 'langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder', 'CharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': '(500)'}), '(chunk_size=500)\n', (1546, 1562), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1642, 1660), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1658, 1660), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1670, 1762), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['docs', '_embedding_function'], {'persist_directory': '"""./US_Constitution"""'}), "(docs, _embedding_function, persist_directory=\n './US_Constitution')\n", (1691, 1762), False, 'from langchain.vectorstores import Chroma\n'), ((1801, 1850), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'temperature': '(0)'}), '(openai_api_key=api_key, temperature=0)\n', (1811, 1850), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1868, 1900), 'langchain.retrievers.document_compressors.LLMChainExtractor.from_llm', 'LLMChainExtractor.from_llm', (['chat'], {}), '(chat)\n', (1894, 1900), False, 'from langchain.retrievers.document_compressors import LLMChainExtractor\n')]
from langchain import OpenAI, SQLDatabase from langchain_experimental.sql import SQLDatabaseChain from gptcache.adapter.langchain_models import LangChainLLMs from gptcache.session import Session from gptcache import cache from gptcache.embedding import Onnx from gptcache.manager import CacheBase, VectorBase, get_data_manager from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation from dotenv import load_dotenv load_dotenv() # Define your content function here if it's not already defined def get_content_func(data, **_): return data.get("prompt").split("Question")[-1] def setup_session(): session = Session(name="sqlite-example") return session def setup_database(session): db = SQLDatabase.from_uri("sqlite:///./Chinook.db") return db def setup_lang_model(session): llm = LangChainLLMs(llm=OpenAI(temperature=0), session=session) return llm def setup_database_chain(llm, database): db_chain = SQLDatabaseChain(llm=llm, database=database, verbose=True) return db_chain def setup_cache(): onnx = Onnx() cache_base = CacheBase('sqlite') vector_base = VectorBase('milvus', host='127.0.0.1', port='19530', dimension=onnx.dimension) data_manager = get_data_manager(cache_base, vector_base) cache.init( pre_embedding_func=get_content_func, embedding_func=onnx.to_embeddings, data_manager=data_manager, similarity_evaluation=SearchDistanceEvaluation(), ) cache.set_openai_key() def run_query(db_chain, query): result = db_chain.run(query) return result def main(): session = setup_session() database = setup_database(session) lang_model = setup_lang_model(session) database_chain = setup_database_chain(lang_model, database) # Set up the cache setup_cache() query = "How many employees are there?" result = run_query(database_chain, query) # Use the result as needed if __name__ == "__main__": main()
[ "langchain.SQLDatabase.from_uri", "langchain_experimental.sql.SQLDatabaseChain", "langchain.OpenAI" ]
[((437, 450), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (448, 450), False, 'from dotenv import load_dotenv\n'), ((637, 667), 'gptcache.session.Session', 'Session', ([], {'name': '"""sqlite-example"""'}), "(name='sqlite-example')\n", (644, 667), False, 'from gptcache.session import Session\n'), ((726, 772), 'langchain.SQLDatabase.from_uri', 'SQLDatabase.from_uri', (['"""sqlite:///./Chinook.db"""'], {}), "('sqlite:///./Chinook.db')\n", (746, 772), False, 'from langchain import OpenAI, SQLDatabase\n'), ((959, 1017), 'langchain_experimental.sql.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'llm': 'llm', 'database': 'database', 'verbose': '(True)'}), '(llm=llm, database=database, verbose=True)\n', (975, 1017), False, 'from langchain_experimental.sql import SQLDatabaseChain\n'), ((1069, 1075), 'gptcache.embedding.Onnx', 'Onnx', ([], {}), '()\n', (1073, 1075), False, 'from gptcache.embedding import Onnx\n'), ((1093, 1112), 'gptcache.manager.CacheBase', 'CacheBase', (['"""sqlite"""'], {}), "('sqlite')\n", (1102, 1112), False, 'from gptcache.manager import CacheBase, VectorBase, get_data_manager\n'), ((1131, 1209), 'gptcache.manager.VectorBase', 'VectorBase', (['"""milvus"""'], {'host': '"""127.0.0.1"""', 'port': '"""19530"""', 'dimension': 'onnx.dimension'}), "('milvus', host='127.0.0.1', port='19530', dimension=onnx.dimension)\n", (1141, 1209), False, 'from gptcache.manager import CacheBase, VectorBase, get_data_manager\n'), ((1229, 1270), 'gptcache.manager.get_data_manager', 'get_data_manager', (['cache_base', 'vector_base'], {}), '(cache_base, vector_base)\n', (1245, 1270), False, 'from gptcache.manager import CacheBase, VectorBase, get_data_manager\n'), ((1478, 1500), 'gptcache.cache.set_openai_key', 'cache.set_openai_key', ([], {}), '()\n', (1498, 1500), False, 'from gptcache import cache\n'), ((847, 868), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (853, 868), False, 'from langchain import OpenAI, SQLDatabase\n'), ((1440, 1466), 'gptcache.similarity_evaluation.distance.SearchDistanceEvaluation', 'SearchDistanceEvaluation', ([], {}), '()\n', (1464, 1466), False, 'from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation\n')]
from langchain_openai import ChatOpenAI from langchain.chains import LLMChain from langchain.memory import ConversationBufferWindowMemory, FileChatMessageHistory from langchain.prompts import ( MessagesPlaceholder, HumanMessagePromptTemplate, ChatPromptTemplate, ) import sqlite3, re, openai from dotenv import load_dotenv # Ignore deprecation warning from langchain import warnings from langchain_core._api.deprecation import LangChainDeprecationWarning warnings.filterwarnings("ignore", category=LangChainDeprecationWarning) load_dotenv() # Helper function to roughly estimate token length for a given text def estimate_token_length(text): return len(text.split()) # Different models tested: # gpt-3.5-turbo-0125 # gpt-4-1106-preview class ChatbotProcessor: def __init__(self, db_path, model_name="gpt-4-1106-preview", max_token_limit=16385, allow_db_edit=False): self.db_path = db_path self.allow_db_edit = allow_db_edit print("Chatbot Processor - Allow DB Edit:", self.allow_db_edit) self.SQLclient = ChatOpenAI(model_name=model_name, temperature=0.2) self.client = ChatOpenAI(model_name=model_name) self.memory = ConversationBufferWindowMemory( chat_memory=FileChatMessageHistory("chat_history.json"), memory_key="history", k=3, return_messages=True, max_token_limit=max_token_limit, ) self.schema_sent = False self.max_token_limit=max_token_limit def generate_sample_content(self): schema_info = self.get_database_schema() description_prompt = ChatPromptTemplate( input_variables=["schema_info"], messages=[ HumanMessagePromptTemplate.from_template("Generate the following for a database with the following schema: {schema_info}:\n a title enclosed in a <h1 class='text-gray-700'> tag,\n a detailed 2-3 sentence layman's description of the schema enclosed in a <p class='text-gray-700'> tag,\n and a <h2 class='text-gray-700'> 'Sample Questions:' heading with 3-4 sample questions enclosed in separate newline <p class='text-gray-700'> tags. Do not provide additional text or explanations."), ], ) description_chain = LLMChain( llm=self.client, prompt=description_prompt, output_key="schema_description", ) try: result = description_chain({"schema_info": schema_info}) return result["schema_description"] except Exception as e: print(f'Response: {result["schema_description"]}') return f"Error in generating content: {e}" def get_database_schema(self): if not self.db_path: raise ValueError("Empty database path provided") try: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") tables = cursor.fetchall() full_schema_info = [] for table in tables: table_name = table[0] # Fetch schema details cursor.execute(f"PRAGMA table_info('{table_name}');") columns = cursor.fetchall() formatted_columns = [f'"{col[1]}" {col[2]}' for col in columns] schema_info = f"Table '{table_name}' with columns: {', '.join(formatted_columns)}." # Fetch the first row of data as a sample cursor.execute(f"SELECT * FROM '{table_name}' LIMIT 1;") sample_data = cursor.fetchone() if sample_data: sample_data_info = ', '.join([f'"{col[1]}": {val}' for col, val in zip(columns, sample_data)]) sample_data_info = f"Sample data: {{ {sample_data_info} }}" else: sample_data_info = "Sample data: None." # Combine schema and sample data full_table_info = f"{schema_info} {sample_data_info}" full_schema_info.append(full_table_info) conn.close() return ' '.join(full_schema_info) except Exception as e: return f"Error reading database schema: {e}" def execute_sql_query(self, sql_query): try: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() cursor.execute(sql_query) if sql_query.strip().lower().startswith("select"): # Fetch results for SELECT queries results = cursor.fetchall() results_str = ", ".join(map(str, results)) else: # For modification queries like INSERT, UPDATE, DELETE conn.commit() results_str = "Database update succeeded. Rows affected: " + str(cursor.rowcount) conn.close() return results_str except sqlite3.Error as e: return f"SQL Error: {e}" def reset_memory(self): print("Chat history cleared.") self.memory.clear() # Clear the chat history with open("chat_history.json", "w") as file: file.write("[]") def _is_within_token_limit(self, history_messages, question): current_tokens = estimate_token_length(" ".join([msg.content for msg in history_messages]) + question) return current_tokens < self.max_token_limit def generate_sql_query(self, question, history_messages, schema_already_sent, database_schema): question_prompt = ChatPromptTemplate( input_variables=["history", "question"], messages=[ MessagesPlaceholder(variable_name="history"), HumanMessagePromptTemplate.from_template( "\nBased on below question and database schema, generate an SQL query for the following question:\n" "Question: {question}\n " "Do not respond with any additional explanations or text." ), ], ) question_chain = LLMChain( llm=self.SQLclient, prompt=question_prompt, output_key="sql_query", memory=self.memory, ) if not schema_already_sent and self._is_within_token_limit(history_messages, question): return question_chain( {"question": f"Question: {question}.\n Database Schema: {database_schema}.\n SQLQuery:", "history": history_messages} ) else: return question_chain( {"question": f"Question: {question}", "history": history_messages} ) def generate_nlp_response(self, question, query_results, history_messages): nlp_prompt = ChatPromptTemplate( input_variables=["history", "question", "query_results"], messages=[ MessagesPlaceholder(variable_name="history"), HumanMessagePromptTemplate.from_template( "You are a data analyst. Generate a natural language response from the given Question: {question}\nand subsequent SQL result: {query_results}. If there is a result, always provide an answer with reference to the original question." ), ], ) nlp_chain = LLMChain( llm=self.client, prompt=nlp_prompt, output_key="nlp_response", ) return nlp_chain({"question": question, "query_results": query_results, "history": history_messages}) def process_message(self, question): try: current_memory = self.memory.load_memory_variables({}) max_attempts = 6 sql_error = None for attempt in range(max_attempts): history_messages = current_memory.get('history', []) schema_already_sent = any("Database Schema:" in message.content for message in history_messages) database_schema = self.get_database_schema() print(f'Database Schema: {database_schema}') question_result = self.generate_sql_query(question, history_messages, schema_already_sent, database_schema) sql_query = question_result["sql_query"] print(f"Attempt {attempt + 1}: SQL Query: {sql_query}") pattern = r"```sql(.*?)```" match = re.search(pattern, sql_query, re.DOTALL | re.IGNORECASE) if match: sql_query = match.group(1).strip() if not sql_query.strip().lower().startswith("select") and not self.allow_db_edit: sql_error = "Database modification not allowed." return sql_error, {"sql_query": sql_query, "sql_output": sql_error} query_results = self.execute_sql_query(sql_query) print(f"Attempt {attempt + 1}: Query Results: {query_results}") if not (isinstance(query_results, str) and query_results.startswith("SQL Error")) and query_results: nlp_result = self.generate_nlp_response(question, query_results, history_messages) bot_response = nlp_result["nlp_response"] self.memory.save_context({"input": question}, {"output": bot_response}) debug_info = {"sql_query": sql_query, "sql_output": query_results} return bot_response, debug_info sql_error = query_results bot_response = "Unable to process your request, this information may not be available." debug_info = {"sql_query": sql_query, "sql_output": sql_error} return bot_response, debug_info except Exception as e: print(f"Exception in process_message: {e}") return "An unexpected error occurred.", {"sql_query": "N/A", "sql_output": "Exception: " + str(e)} # Flask route integration def process_chat_message(question, db_path): processor = ChatbotProcessor(db_path) response, debug_info = processor.process_message(question) return response, debug_info
[ "langchain_openai.ChatOpenAI", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.prompts.MessagesPlaceholder", "langchain.memory.FileChatMessageHistory", "langchain.chains.LLMChain" ]
[((469, 540), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'LangChainDeprecationWarning'}), "('ignore', category=LangChainDeprecationWarning)\n", (492, 540), False, 'import warnings\n'), ((542, 555), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (553, 555), False, 'from dotenv import load_dotenv\n'), ((1063, 1113), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name', 'temperature': '(0.2)'}), '(model_name=model_name, temperature=0.2)\n', (1073, 1113), False, 'from langchain_openai import ChatOpenAI\n'), ((1136, 1169), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (1146, 1169), False, 'from langchain_openai import ChatOpenAI\n'), ((2274, 2364), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.client', 'prompt': 'description_prompt', 'output_key': '"""schema_description"""'}), "(llm=self.client, prompt=description_prompt, output_key=\n 'schema_description')\n", (2282, 2364), False, 'from langchain.chains import LLMChain\n'), ((6170, 6270), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.SQLclient', 'prompt': 'question_prompt', 'output_key': '"""sql_query"""', 'memory': 'self.memory'}), "(llm=self.SQLclient, prompt=question_prompt, output_key='sql_query',\n memory=self.memory)\n", (6178, 6270), False, 'from langchain.chains import LLMChain\n'), ((7404, 7475), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.client', 'prompt': 'nlp_prompt', 'output_key': '"""nlp_response"""'}), "(llm=self.client, prompt=nlp_prompt, output_key='nlp_response')\n", (7412, 7475), False, 'from langchain.chains import LLMChain\n'), ((2871, 2900), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (2886, 2900), False, 'import sqlite3, re, openai\n'), ((4400, 4429), 'sqlite3.connect', 'sqlite3.connect', (['self.db_path'], {}), '(self.db_path)\n', (4415, 4429), False, 'import sqlite3, re, openai\n'), ((1248, 1291), 'langchain.memory.FileChatMessageHistory', 'FileChatMessageHistory', (['"""chat_history.json"""'], {}), "('chat_history.json')\n", (1270, 1291), False, 'from langchain.memory import ConversationBufferWindowMemory, FileChatMessageHistory\n'), ((8508, 8564), 're.search', 're.search', (['pattern', 'sql_query', '(re.DOTALL | re.IGNORECASE)'], {}), '(pattern, sql_query, re.DOTALL | re.IGNORECASE)\n', (8517, 8564), False, 'import sqlite3, re, openai\n'), ((1735, 2229), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""Generate the following for a database with the following schema: {schema_info}:\n a title enclosed in a <h1 class=\'text-gray-700\'> tag,\n a detailed 2-3 sentence layman\'s description of the schema enclosed in a <p class=\'text-gray-700\'> tag,\n and a <h2 class=\'text-gray-700\'> \'Sample Questions:\' heading with 3-4 sample questions enclosed in separate newline <p class=\'text-gray-700\'> tags. Do not provide additional text or explanations."""'], {}), '(\n """Generate the following for a database with the following schema: {schema_info}:\n a title enclosed in a <h1 class=\'text-gray-700\'> tag,\n a detailed 2-3 sentence layman\'s description of the schema enclosed in a <p class=\'text-gray-700\'> tag,\n and a <h2 class=\'text-gray-700\'> \'Sample Questions:\' heading with 3-4 sample questions enclosed in separate newline <p class=\'text-gray-700\'> tags. Do not provide additional text or explanations."""\n )\n', (1775, 2229), False, 'from langchain.prompts import MessagesPlaceholder, HumanMessagePromptTemplate, ChatPromptTemplate\n'), ((5750, 5794), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""history"""'}), "(variable_name='history')\n", (5769, 5794), False, 'from langchain.prompts import MessagesPlaceholder, HumanMessagePromptTemplate, ChatPromptTemplate\n'), ((5812, 6044), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""\nBased on below question and database schema, generate an SQL query for the following question:\nQuestion: {question}\n Do not respond with any additional explanations or text."""'], {}), '(\n """\nBased on below question and database schema, generate an SQL query for the following question:\nQuestion: {question}\n Do not respond with any additional explanations or text."""\n )\n', (5852, 6044), False, 'from langchain.prompts import MessagesPlaceholder, HumanMessagePromptTemplate, ChatPromptTemplate\n'), ((6983, 7027), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""history"""'}), "(variable_name='history')\n", (7002, 7027), False, 'from langchain.prompts import MessagesPlaceholder, HumanMessagePromptTemplate, ChatPromptTemplate\n'), ((7045, 7331), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""You are a data analyst. Generate a natural language response from the given Question: {question}\nand subsequent SQL result: {query_results}. If there is a result, always provide an answer with reference to the original question."""'], {}), '(\n """You are a data analyst. Generate a natural language response from the given Question: {question}\nand subsequent SQL result: {query_results}. If there is a result, always provide an answer with reference to the original question."""\n )\n', (7085, 7331), False, 'from langchain.prompts import MessagesPlaceholder, HumanMessagePromptTemplate, ChatPromptTemplate\n')]
import os import sys module_path = ".." sys.path.append(os.path.abspath(module_path)) import langchain from langchain.document_loaders import ConfluenceLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.prompts import PromptTemplate from langchain.chains import RetrievalQA from langchain.embeddings import BedrockEmbeddings from langchain.llms.bedrock import Bedrock from langchain.vectorstores import FAISS from langchain.indexes.vectorstore import VectorStoreIndexWrapper from utils import bedrock boto3_bedrock = bedrock.get_bedrock_client( assumed_role=os.environ.get("BEDROCK_ASSUME_ROLE", None), endpoint_url=os.environ.get("BEDROCK_ENDPOINT_URL", None), region=os.environ.get("AWS_DEFAULT_REGION", None), ) class BedrockConfluenceQA: def __init__(self, config: dict = {}): self.config = config self.embedding = None self.vectordb = None self.llm = None self.qa = None self.retriever = None self.model_id = None def init_embeddings(self) -> None: # AWS Bedrock Embeddings self.embedding = BedrockEmbeddings(client=boto3_bedrock) def init_models(self, parameters: dict = {}) -> None: self.parameters = parameters max_token_count = self.parameters.get("max_token_count", 512) temprature = self.parameters.get("temprature", 1) top_p = self.parameters.get("top_p", 1) top_k = self.parameters.get("top_k", 1) model_id = self.parameters.get("model_id", "amazon.titan-tg1-large") self.model_id = model_id # AWS Bedrock titan if "claude" in model_id: self.llm = Bedrock( model_id=model_id, client=boto3_bedrock, model_kwargs={ "max_tokens_to_sample":max_token_count, "temperature": temprature, "top_k": top_k, "top_p": top_p, } ) if "titan" in model_id: self.llm = Bedrock(model_id=model_id, client=boto3_bedrock, model_kwargs= { "maxTokenCount": max_token_count, "temperature": temprature, "topP": top_p, }) if "ai21" in model_id: self.llm = Bedrock(model_id=model_id, client=boto3_bedrock, model_kwargs= { "maxTokens": max_token_count, "temperature": temprature, "topP": top_p, }) def vector_db_confluence_docs(self, force_reload: bool = False) -> None: """ creates vector db for the embeddings and persists them or loads a vector db from the persist directory """ persist_directory = self.config.get("persist_directory", None) confluence_url = self.config.get("confluence_url", None) username = self.config.get("username", None) api_key = self.config.get("api_key", None) space_key = self.config.get("space_key", None) if persist_directory and os.path.exists(persist_directory) and not force_reload: ## Load from the persist db self.vectordb = FAISS.load_local("faiss_index", embeddings=self.embedding) else: loader = ConfluenceLoader( url=confluence_url, username=username, api_key=api_key ) documents = loader.load(space_key=space_key, limit=50) ## 2. Split the texts text_splitter = RecursiveCharacterTextSplitter( # Set a really small chunk size, just to show. # Make sure the chunk size does not exceed titan text embeddings max tokens (512) chunk_size=1000, chunk_overlap=100, # separators=["\n", "\n\n"] ) docs = text_splitter.split_documents(documents) print(len(docs)) ## 3. Create Embeddings and add to chroma store ##TODO: Validate if self.embedding is not None vectorstore_faiss = FAISS.from_documents( docs, self.embedding, ) VectorStoreIndexWrapper(vectorstore=vectorstore_faiss) self.vectordb = vectorstore_faiss # vectorstore_faiss_aws.save_local("faiss_index") def retreival_qa_chain(self): """ Creates retrieval qa chain using vectordb as retrivar and LLM to complete the prompt """ ##TODO: Use custom prompt self.retriever = self.vectordb.as_retriever(search_kwargs={"k": 10}) # self.qa = RetrievalQA.from_chain_type(llm=self.llm, chain_type="stuff",retriever=self.retriever) # prompt_template = """Human: Use the following pieces of context to provide a concise answer to the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. # {context} # Question: {question} # Assistant:""" # prompt_template = """Human: Please use the context below to craft a succinct response to the following question. If you don't have the information, it's okay to state that you're unaware instead of inventing an answer. # {context} # Question: {question} # Assistant:""" prompt_template = """Human: Utilize the context provided to formulate a comprehensive response to the following question. If you're uncertain about the answer, it's perfectly fine to acknowledge that you're unsure rather than providing speculative information. {context} Question: {question} Assistant:""" ## used for the bulk answers generation prompt_template = """# INSTRUCTION Answer any question about onboarding or company-related topics at LogicWorks acting as a onboarding manager. If you don't have the information, it's okay to state that you're unaware instead of inventing an answer. Utilize the context provided to formulate a comprehensive response to the following question. If you don't have the information, it's okay to state that you're unaware instead of inventing an answer. # CONTEXT {context} # QUESTION {question} Assistant: """ prompt_template = """User: Answer the question based only on the information provided between ##. If you don't know the answer, just say that you don't know, don't try to make up an answer. # {context} # Question: {question} Assistant:""" PROMPT = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) self.qa = RetrievalQA.from_chain_type( llm=self.llm, chain_type="stuff", retriever=self.retriever, return_source_documents=True, chain_type_kwargs={"prompt": PROMPT}, ) def answer_confluence(self, question: str) -> str: """ Answer the question """ answer = self.qa({"query": question}) return answer
[ "langchain.embeddings.BedrockEmbeddings", "langchain.vectorstores.FAISS.load_local", "langchain.chains.RetrievalQA.from_chain_type", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.indexes.vectorstore.VectorStoreIndexWrapper", "langchain.document_loaders.ConfluenceLoader", "langchain.vectorstores.FAISS.from_documents", "langchain.llms.bedrock.Bedrock", "langchain.prompts.PromptTemplate" ]
[((58, 86), 'os.path.abspath', 'os.path.abspath', (['module_path'], {}), '(module_path)\n', (73, 86), False, 'import os\n'), ((606, 649), 'os.environ.get', 'os.environ.get', (['"""BEDROCK_ASSUME_ROLE"""', 'None'], {}), "('BEDROCK_ASSUME_ROLE', None)\n", (620, 649), False, 'import os\n'), ((668, 712), 'os.environ.get', 'os.environ.get', (['"""BEDROCK_ENDPOINT_URL"""', 'None'], {}), "('BEDROCK_ENDPOINT_URL', None)\n", (682, 712), False, 'import os\n'), ((725, 767), 'os.environ.get', 'os.environ.get', (['"""AWS_DEFAULT_REGION"""', 'None'], {}), "('AWS_DEFAULT_REGION', None)\n", (739, 767), False, 'import os\n'), ((1135, 1174), 'langchain.embeddings.BedrockEmbeddings', 'BedrockEmbeddings', ([], {'client': 'boto3_bedrock'}), '(client=boto3_bedrock)\n', (1152, 1174), False, 'from langchain.embeddings import BedrockEmbeddings\n'), ((6643, 6728), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (6657, 6728), False, 'from langchain.prompts import PromptTemplate\n'), ((6770, 6935), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'self.llm', 'chain_type': '"""stuff"""', 'retriever': 'self.retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': PROMPT}"}), "(llm=self.llm, chain_type='stuff', retriever=\n self.retriever, return_source_documents=True, chain_type_kwargs={\n 'prompt': PROMPT})\n", (6797, 6935), False, 'from langchain.chains import RetrievalQA\n'), ((1689, 1861), 'langchain.llms.bedrock.Bedrock', 'Bedrock', ([], {'model_id': 'model_id', 'client': 'boto3_bedrock', 'model_kwargs': "{'max_tokens_to_sample': max_token_count, 'temperature': temprature,\n 'top_k': top_k, 'top_p': top_p}"}), "(model_id=model_id, client=boto3_bedrock, model_kwargs={\n 'max_tokens_to_sample': max_token_count, 'temperature': temprature,\n 'top_k': top_k, 'top_p': top_p})\n", (1696, 1861), False, 'from langchain.llms.bedrock import Bedrock\n'), ((2068, 2217), 'langchain.llms.bedrock.Bedrock', 'Bedrock', ([], {'model_id': 'model_id', 'client': 'boto3_bedrock', 'model_kwargs': "{'maxTokenCount': max_token_count, 'temperature': temprature, 'topP': top_p}"}), "(model_id=model_id, client=boto3_bedrock, model_kwargs={\n 'maxTokenCount': max_token_count, 'temperature': temprature, 'topP': top_p}\n )\n", (2075, 2217), False, 'from langchain.llms.bedrock import Bedrock\n'), ((2326, 2465), 'langchain.llms.bedrock.Bedrock', 'Bedrock', ([], {'model_id': 'model_id', 'client': 'boto3_bedrock', 'model_kwargs': "{'maxTokens': max_token_count, 'temperature': temprature, 'topP': top_p}"}), "(model_id=model_id, client=boto3_bedrock, model_kwargs={'maxTokens':\n max_token_count, 'temperature': temprature, 'topP': top_p})\n", (2333, 2465), False, 'from langchain.llms.bedrock import Bedrock\n'), ((3068, 3101), 'os.path.exists', 'os.path.exists', (['persist_directory'], {}), '(persist_directory)\n', (3082, 3101), False, 'import os\n'), ((3192, 3250), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['"""faiss_index"""'], {'embeddings': 'self.embedding'}), "('faiss_index', embeddings=self.embedding)\n", (3208, 3250), False, 'from langchain.vectorstores import FAISS\n'), ((3286, 3358), 'langchain.document_loaders.ConfluenceLoader', 'ConfluenceLoader', ([], {'url': 'confluence_url', 'username': 'username', 'api_key': 'api_key'}), '(url=confluence_url, username=username, api_key=api_key)\n', (3302, 3358), False, 'from langchain.document_loaders import ConfluenceLoader\n'), ((3518, 3584), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(100)'}), '(chunk_size=1000, chunk_overlap=100)\n', (3548, 3584), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((4079, 4121), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'self.embedding'], {}), '(docs, self.embedding)\n', (4099, 4121), False, 'from langchain.vectorstores import FAISS\n'), ((4181, 4235), 'langchain.indexes.vectorstore.VectorStoreIndexWrapper', 'VectorStoreIndexWrapper', ([], {'vectorstore': 'vectorstore_faiss'}), '(vectorstore=vectorstore_faiss)\n', (4204, 4235), False, 'from langchain.indexes.vectorstore import VectorStoreIndexWrapper\n')]
import pickle import torch from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import (ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate,) import numpy as np import random np.int = int #fixing shap/numpy compatibility issue from sklearn.metrics import classification_report import shap from matplotlib import pyplot as plt from langchain.chains import LLMChain from lime_stability.stability import LimeTabularExplainerOvr import argparse import pandas as pd from sklearn.model_selection import train_test_split import pathlib import langchain from langchain.output_parsers.enum import EnumOutputParser from enum import Enum from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter #cf. https://stackoverflow.com/a/53304527/5899161 from sklearn.preprocessing import LabelEncoder from collections import defaultdict import fastshap from torch import nn import dice_ml from anchor import anchor_tabular from langchain.llms import VLLM from SALib.sample import morris as morris_sample from SALib.test_functions import Ishigami from SALib.analyze import morris as morris_analyze from langchain.chat_models import AzureChatOpenAI from langchain.schema import HumanMessage import tqdm #langchain.verbose=True def vicuna15(temperature=.7): model = "vicuna" llm = ChatOpenAI(model_name=model, openai_api_key="EMPTY", openai_api_base="http://localhost:8000/v1", max_tokens=150, verbose=True, temperature=temperature) return llm def llama2(temperature=.4): model = "llama2" llm = ChatOpenAI(model_name=model, openai_api_key="EMPTY", openai_api_base="http://localhost:8000/v1", max_tokens=150, temperature=temperature) return llm def llama2_hf_70b(temperature = .4): #cf. https://www.pinecone.io/learn/llama-2/ import torch import transformers from langchain.llms import HuggingFacePipeline bnb_config = transformers.BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16 ) model_id = "/lu/tetyda/home/lgorski/llama/llama-2-70b-chat-hf/models--meta-llama--Llama-2-70b-chat-hf/snapshots/36d9a7388cc80e5f4b3e9701ca2f250d21a96c30/" model_config = transformers.AutoConfig.from_pretrained(model_id) model = transformers.AutoModelForCausalLM.from_pretrained( model_id, config=model_config, quantization_config=bnb_config, device_map = "auto") tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) generate_text = transformers.pipeline( model=model, tokenizer=tokenizer, task="text-generation", temperature=temperature, max_new_tokens=150, repetition_penalty=1.1 ) llm = HuggingFacePipeline(pipeline=generate_text) return llm def gpt4_azure(temperature=.3): import json import os with open("gpt4.json", encoding="utf-8") as credentials_file: credentials = json.load(credentials_file) llm = AzureChatOpenAI( openai_api_base=credentials["OPENAI_API_BASE"], openai_api_version=credentials["OPENAI_API_VERSION"], deployment_name="test-gpt4-32k", openai_api_key=credentials["OPENAI_API_KEY"], openai_api_type=credentials["OPENAI_API_TYPE"], max_tokens=150, temperature=temperature, ) return llm def grouper(iterable, n): for i in range(0, len(iterable), n): yield iterable[i:i+n] #returns a list of violated rules def predict_rules_only(X, features_closure, encoder : defaultdict): def analyze_rules(X): features = features_closure.tolist() violated = [] #r1 (gender = f and age >= 60) or (gender = male and age >= 65) gender_idx = features.index("gender") age_idx = features.index("age") gender = encoder[gender_idx].inverse_transform([X[gender_idx]])[0] age = X[age_idx] if not((gender == "f" and X[age_idx] >= 60) or (gender == "m" and age >= 65)): violated.append(1) #r2 r2: at least four of the following features are "yes": paid_contribution_1, paid_contribution_2, paid_contribution_3, paid_contribution_4, paid_contribution_5 paid_contribution_1_idx = features.index("paid_contribution_1") paid_contribution_2_idx = features.index("paid_contribution_2") paid_contribution_3_idx = features.index("paid_contribution_3") paid_contribution_4_idx = features.index("paid_contribution_4") paid_contribution_5_idx = features.index("paid_contribution_5") paid_contribution_1 = encoder[paid_contribution_1_idx].inverse_transform([X[paid_contribution_1_idx]])[0] paid_contribution_2 = encoder[paid_contribution_2_idx].inverse_transform([X[paid_contribution_2_idx]])[0] paid_contribution_3 = encoder[paid_contribution_3_idx].inverse_transform([X[paid_contribution_3_idx]])[0] paid_contribution_4 = encoder[paid_contribution_4_idx].inverse_transform([X[paid_contribution_4_idx]])[0] paid_contribution_5 = encoder[paid_contribution_5_idx].inverse_transform([X[paid_contribution_5_idx]])[0] paid_contributions = sum([1 if elem == "yes" else 0 for elem in [paid_contribution_1, paid_contribution_2, paid_contribution_3, paid_contribution_4, paid_contribution_5]]) if not (paid_contributions >= 4): violated.append(2) #r3 r3: is_spouse=yes is_spouse_idx = features.index("is_spouse") is_spouse = encoder[is_spouse_idx].inverse_transform([X[is_spouse_idx]])[0] == "True" if not (is_spouse == True): violated.append(3) #r4 is_absent=no is_absent_idx = features.index("is_absent") is_absent = encoder[is_absent_idx].inverse_transform([X[is_absent_idx]])[0] == "True" if not (is_absent == False): violated.append(4) #r5 it is not true that capital_resources >= 3000 capital_resources_idx = features.index("capital_resources") capital_resources = X[capital_resources_idx] if capital_resources >= 3000: violated.append(5) # r6: (patient_type= in and distance_to_hospital < 50) or (patient_type=out and distance_to_hospital >= 50) patient_type_idx = features.index("patient_type") distance_to_hospital_idx = features.index("distance_to_hospital") patient_type = encoder[patient_type_idx].inverse_transform([X[patient_type_idx]])[0] distance_to_hospital = X[distance_to_hospital_idx] if not ((patient_type == "in" and distance_to_hospital < 50) or (patient_type == "out" and distance_to_hospital >= 50)): violated.append(6) return violated def inner(X, violated_rules=None): if violated_rules==None: violated_rules=[] result = [] for row in X: violated = analyze_rules(row) violated_rules.append(violated) if len(violated) == 0: result.append(1) else: result.append(0) return np.array(result) return inner def predict_rules_simplified_only(X, features_closure, encoder : defaultdict): def analyze_rules(X): features = features_closure.tolist() violated = [] #r1 (gender = f and age >= 60) or (gender = male and age >= 65) gender_idx = features.index("gender") age_idx = features.index("age") gender = encoder[gender_idx].inverse_transform([X[gender_idx]])[0] age = X[age_idx] if not((gender == "f" and X[age_idx] >= 60) or (gender == "m" and age >= 65)): violated.append(1) # r2: (patient_type= in and distance_to_hospital < 50) or (patient_type=out and distance_to_hospital >= 50) patient_type_idx = features.index("patient_type") distance_to_hospital_idx = features.index("distance_to_hospital") patient_type = encoder[patient_type_idx].inverse_transform([X[patient_type_idx]])[0] distance_to_hospital = X[distance_to_hospital_idx] if not ((patient_type == "in" and distance_to_hospital < 50) or (patient_type == "out" and distance_to_hospital >= 50)): violated.append(2) return violated def inner(X, violated_rules=None): if violated_rules==None: violated_rules=[] result = [] for row in X: violated = analyze_rules(row) violated_rules.append(violated) if len(violated) == 0: result.append(1) else: result.append(0) return np.array(result) return inner import time def predict_rules(chain, features, encoder : defaultdict, configuration=None, save_reply=False, memory={}): def predict_rules_inner(X : np.ndarray, output = None): results = [] widgets = [' [', Percentage(), '] ', Bar(), ' (', Timer(), ') ', ETA(), ' ', Counter(), ' of ', str(len(X))] pbar = ProgressBar(widgets=widgets, maxval=len(X)).start() counter = 0 X=np.array(X, dtype=object) for index, encoding in encoder.items(): inversed = encoding.inverse_transform(X[:, index].astype(int)) X[:, index] = inversed for row in X: if row.tobytes() in memory: classification = memory[row.tobytes()] else: text=",".join([str(elem) for elem in row]) classification = chain(text) if output is not None: output += [classification] if save_reply: with open(f"log_{configuration.model_factory}.txt", "a") as log: log.write(classification + "\n") classification=classification["text"] cleaned = classification.strip().replace(".", "").lower() if "granted" in cleaned: results.append(1) elif "denied" in cleaned: results.append(0) else: #answer not fitting the template results.append(2) counter += 1 pbar.update(counter) if configuration.throttle: time.sleep(configuration.throttle) pbar.finish() return np.array(results) return predict_rules_inner # def lime_explainer(train, test, predict, feature_names, encoder): # categorical_features = list(sorted(encoder.keys())) # categorical_names = { key : list(encoder[key].classes_) for key in categorical_features} # explainer = LimeTabularExplainerOvr(train, feature_names=feature_names, categorical_features=categorical_features, # categorical_names=categorical_names, # class_names=["not granted", "granted", "unknown"]) # print(explainer.explain_instance(np.array(test[0]), predict).as_list()) import random def shap_explainer(train, test, y_train, y_test, predict, features, encoder, configuration): explainer = shap.KernelExplainer(model=predict, data=shap.sample(train, 100)) #test=pd.read_csv(r"data/welfare_dataset/DiscoveringTheRationaleOfDecisions/datasets/confused_gpt4.csv").drop(columns=["eligible"]) shap_values = explainer.shap_values(test) if configuration.saveout: np.save(f'shap_values_{configuration.model_factory}.npy', shap_values) print(shap_values) #shap.summary_plot(shap_values, show=False, feature_names=features, class_names=["not granted", "granted", "unknown"]) #plt.savefig('vis.png') def morris_explainer(train, test, y_train, y_test, predict, features, encoder, configuration): from interpret.blackbox import MorrisSensitivity msa = MorrisSensitivity(predict, test, feature_names=features, num_resamples=10, num_levels=2) print(msa) def anchor_explainer(train, test, y_train, y_test, predict, features, encoder, configuration): explainer = anchor_tabular.AnchorTabularExplainer( ["not granted", "granted", "unknown"], features, train, { key: value.classes_ for key, value in encoder.items() }) explanations = [] for test_instance_idx in range(test.shape[0]): print (f"calculating {test_instance_idx} of {len(test)}") explanation = explainer.explain_instance(test[test_instance_idx], predict, threshold=0.95) print("Anchor: %s" % (' AND '.join(explanation.names()))) print('Precision: %.2f' % explanation.precision()) print('Coverage: %.2f' % explanation.coverage()) explanations.append(explanation) if configuration.saveout: with open("anchor_explanations.pkl", "wb") as output: pickle.dump(explanations, output) def counterfactual_explainer(train, test, y_train, y_test, predict, features, encoder, configuration): pd_train_x = pd.DataFrame(train, columns=features) pd_train_y = pd.DataFrame(y_train, columns=["eligible"]) pd_test_x = pd.DataFrame(test, columns=features) pd_test_y = pd.DataFrame(y_test, columns=["eligible"]) dice_data = pd.concat([pd_train_x, pd_train_y], axis=1, join='inner') dice_data = dice_data.infer_objects() continuous_features = [ features[idx] for idx in encoder.keys()] dice_dataset = dice_ml.Data(dataframe=dice_data, outcome_name='eligible', continuous_features=continuous_features) dice_model = dice_ml.Model(model=predict, backend="sklearn") exp = dice_ml.Dice(dice_dataset, dice_model, method="random") explanation = exp.generate_counterfactuals(pd.DataFrame(pd_test_x, columns=features), total_CFs=3, desired_class="opposite") print(explanation) #print(exp.visualize_as_dataframe(show_only_changes=True, display_sparse_df=False)) def define_command_line_options(): parser = argparse.ArgumentParser() parser.add_argument('--model-factory', choices=["llama2_hf_70b", "llama2_transformers", "vicuna", "vicuna15", "vicuna_vllm", "llama2_vllm", "gpt4_azure", "llama2" ], default="vicuna") parser.add_argument('--dataset', type=str, default="A_2400.csv") parser.add_argument('--predict-function', choices = ["predict_rules", "predict_rules_only"], default="predict_rules") parser.add_argument('--system-template', type=str, default="system_template_6_conditions.txt") parser.add_argument('--xai', default=[], choices=["shap_explainer", "fastshap_explainer", "fastshap2_explainer", "morris_explainer", "counterfactual_explainer"], action='append') parser.add_argument('--saveout', default=False, action='store_true') parser.add_argument('--classification-report', default=False, action='store_true') parser.add_argument('--fastshap-model-load', default=None, type=str) parser.add_argument('--fastshap-model-save', default=None, type=str) parser.add_argument('--drop-noise', default=False, action='store_true') parser.add_argument('--check-rules', default=False, action='store_true') parser.add_argument('--test-size', default=.2, type=float) parser.add_argument('--optimize-temperature', default=False, action='store_true') parser.add_argument('--throttle', default=0, type=int) parser.add_argument('--stability', default=False, action='store_true') parser.add_argument('--surrogate-model', default=None, type=str) parser.add_argument('--ablation-study', default=False, action='store_true') parser.add_argument('--confusion-study', default=False, action='store_true') return parser def read_command_line_options(parser : argparse.ArgumentParser) -> argparse.Namespace: return parser.parse_args() #this function does too much, split it later def prepare_train_test_split(dataset, test_size=.2, drop_noise=False, random_state=42): df = pd.read_csv(dataset) encoding = defaultdict(LabelEncoder) mask = (df.dtypes == object) | (df.dtypes == bool) X = df.drop(columns=["eligible"], axis=1) if drop_noise: to_drop = [ col for col in X.columns if "noise" in col] X = X.drop(columns=to_drop, axis=1) X.loc[:, mask] = X.loc[:, mask].astype(str).apply(lambda s: encoding[X.columns.get_loc(s.name)].fit_transform(s)) X_columns = X.columns #conversion to numpy array, because shap works with numpy arrays X = X.to_numpy() y = df["eligible"].to_numpy() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state, stratify=y) return X, y, X_train, y_train, X_test, y_test, X_columns, encoding parser = define_command_line_options() configuration = read_command_line_options(parser) system_template = pathlib.Path(configuration.system_template).read_text() system_prompt = SystemMessagePromptTemplate.from_template(system_template) X, y, X_train, y_train, X_test, y_test, columns, encoding = prepare_train_test_split(configuration.dataset, test_size=configuration.test_size, drop_noise=configuration.drop_noise) X_columns = ",".join([name for name in columns]) system_prompt = system_prompt.format(names=X_columns) human_template = """{features}""" human_prompt = HumanMessagePromptTemplate.from_template(human_template) chat_prompt = ChatPromptTemplate.from_messages([system_prompt, human_prompt]) llm = globals()[configuration.model_factory]() chain = LLMChain(llm=llm, prompt=chat_prompt) predict = globals()[configuration.predict_function](chain, columns, encoding, configuration=configuration) def create_llm_classifier(predict): def _create_llm_classifier(cls): from sklearn.base import BaseEstimator, ClassifierMixin class LLMEstimatorSK(BaseEstimator, ClassifierMixin, cls): def __init__(self, temperature=.7): self.temperature = temperature def fit(self, X, y): return self def predict(self, X): return predict(X) return LLMEstimatorSK return _create_llm_classifier @create_llm_classifier(predict=predict) class LLMEstimatorSK: def __init__(self, temperature=.7): self.temperature = temperature def fit(self, X, y): return self def predict(self, X): return predict(X) def indices_tmp(): indices = range(len(X_train)) _, _, _, _, indices_train, indices_test = train_test_split(X_train, y_train, indices, test_size=configuration.test_size, random_state=42, stratify=y_train) yield indices_train, indices_test if configuration.optimize_temperature: from sklearn.model_selection import GridSearchCV grid = GridSearchCV(LLMEstimatorSK(), cv=indices_tmp(), param_grid={"temperature": [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1]}) grid.fit(X_train, y_train) print(grid.best_params_) scores = pd.DataFrame(grid.cv_results_) scores.to_excel(f"grid_search_{configuration.model_factory}_04_06.xlsx") #rules consistency check if configuration.check_rules: predict_rules=predict_rules_only(X_train, columns, encoding) for idx in range(len(X_train)): violated = analyze_rules(X_train[idx], columns, encoding) if (len(violated) == 0 and y_train[idx] == False) or (len(violated) > 0 and y_train[idx] == True): print(f"rule violation for: {X_train[idx]}") if configuration.classification_report: y_pred = predict(X_test) print(classification_report(y_test, y_pred)) if configuration.stability: output = [] for _ in range(5): predict(X_test, output=output) import json with open(f"stability_{configuration.model_factory}.json", "w") as output_file: json.dump(output, output_file) if configuration.surrogate_model: from sklearn.tree import DecisionTreeClassifier surrogate_model = DecisionTreeClassifier(random_state=42) #get model answers answers = pd.read_json(configuration.surrogate_model) target = pd.DataFrame([1 if "granted" in text.lower() else 0 for text in answers["text"].tolist()]) #get indices of the test set indices = range(len(X)) _, _, _, _, indices_train, indices_test = train_test_split(X, y, indices, test_size=configuration.test_size, random_state=42, stratify=y) print(len(target)) surrogate_model.fit(X_train, target.iloc[indices_train]) print(surrogate_model.feature_importances_) for xai in configuration.xai: globals()[xai](X_train, X_test, y_train, y_test, surrogate_model.predict, columns, encoding, configuration) for xai in configuration.xai: globals()[xai](X_train, X_test, y_train, y_test, predict, columns, encoding, configuration) if configuration.ablation_study: import json output=[] y_pred=predict(X_test, output=output) with open(f"ablation_{configuration.system_template}", "w") as classification_file, \ open(f"ablation_{configuration.system_template}.json", "w") as json_file: classification_file.write(classification_report(y_test, y_pred)) json.dump(output, json_file) if configuration.confusion_study: # ---tmp---- class Status: def __init__(self): self.first_rule_violated = False, self.second_rule_violated = False, self.y_true = None, self.y_pred = None self.features = None self.answer = None def __str__(self): return f"first_rule_violated: {self.first_rule_violated}, second_rule_violated: {self.second_rule_violated}, y_true: {self.y_true}, y_pred: {self.y_pred}, features: {self.features}, gpt4: {self.answer}" def __repr__(self): return self.__str__() def __hash__(self): return hash((self.first_rule_violated, self.second_rule_violated, self.y_true, self.y_pred)) def __eq__(self, other): return (self.first_rule_violated, self.second_rule_violated, self.y_true, self.y_pred) == (other.first_rule_violated, other.second_rule_violated, other.y_true, other.y_pred) # #additional modules statuses = set() y_=[1 if yy else 0 for yy in y_test] analyze_rules=predict_rules_simplified_only(X_test, columns, encoding) violations=[] analyze_rules(X_test, violations) x_test_set = [] y_test_set = [] for rule, violation, y_true in zip(X_test, violations, y_): if len(statuses) == 8: break status = Status() status.y_true = y_true output=[] status.y_pred = predict([rule], output=output)[0] status.features = rule status.first_rule_violated = 1 in violation status.second_rule_violated = 2 in violation status.answer = output[0] if status not in statuses: statuses.add(status) x_test_set += [rule] y_test_set += [y_] print(status) print("----- Found rule ----") with open("confusion_study.txt", "a") as file: file.write(str(status)) file.write("\n") print("-----------------") print(statuses) print("---performing xai----") shap_explainer(X_train, np.array(x_test_set), y_train, y_test_set, predict, columns, encoding, configuration)
[ "langchain.prompts.chat.SystemMessagePromptTemplate.from_template", "langchain.llms.HuggingFacePipeline", "langchain.chat_models.ChatOpenAI", "langchain.chat_models.AzureChatOpenAI", "langchain.prompts.chat.HumanMessagePromptTemplate.from_template", "langchain.chains.LLMChain", "langchain.prompts.chat.ChatPromptTemplate.from_messages" ]
[((17189, 17247), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['system_template'], {}), '(system_template)\n', (17230, 17247), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((17759, 17815), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (17799, 17815), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((17833, 17896), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_prompt, human_prompt]'], {}), '([system_prompt, human_prompt])\n', (17865, 17896), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((17960, 17997), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'chat_prompt'}), '(llm=llm, prompt=chat_prompt)\n', (17968, 17997), False, 'from langchain.chains import LLMChain\n'), ((1366, 1527), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model', 'openai_api_key': '"""EMPTY"""', 'openai_api_base': '"""http://localhost:8000/v1"""', 'max_tokens': '(150)', 'verbose': '(True)', 'temperature': 'temperature'}), "(model_name=model, openai_api_key='EMPTY', openai_api_base=\n 'http://localhost:8000/v1', max_tokens=150, verbose=True, temperature=\n temperature)\n", (1376, 1527), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1598, 1740), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model', 'openai_api_key': '"""EMPTY"""', 'openai_api_base': '"""http://localhost:8000/v1"""', 'max_tokens': '(150)', 'temperature': 'temperature'}), "(model_name=model, openai_api_key='EMPTY', openai_api_base=\n 'http://localhost:8000/v1', max_tokens=150, temperature=temperature)\n", (1608, 1740), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1978, 2136), 'transformers.BitsAndBytesConfig', 'transformers.BitsAndBytesConfig', ([], {'load_in_4bit': '(True)', 'bnb_4bit_quant_type': '"""nf4"""', 'bnb_4bit_use_double_quant': '(True)', 'bnb_4bit_compute_dtype': 'torch.bfloat16'}), "(load_in_4bit=True, bnb_4bit_quant_type=\n 'nf4', bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.\n bfloat16)\n", (2009, 2136), False, 'import transformers\n'), ((2352, 2401), 'transformers.AutoConfig.from_pretrained', 'transformers.AutoConfig.from_pretrained', (['model_id'], {}), '(model_id)\n', (2391, 2401), False, 'import transformers\n'), ((2417, 2553), 'transformers.AutoModelForCausalLM.from_pretrained', 'transformers.AutoModelForCausalLM.from_pretrained', (['model_id'], {'config': 'model_config', 'quantization_config': 'bnb_config', 'device_map': '"""auto"""'}), "(model_id, config=\n model_config, quantization_config=bnb_config, device_map='auto')\n", (2466, 2553), False, 'import transformers\n'), ((2605, 2657), 'transformers.AutoTokenizer.from_pretrained', 'transformers.AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id)\n', (2647, 2657), False, 'import transformers\n'), ((2681, 2838), 'transformers.pipeline', 'transformers.pipeline', ([], {'model': 'model', 'tokenizer': 'tokenizer', 'task': '"""text-generation"""', 'temperature': 'temperature', 'max_new_tokens': '(150)', 'repetition_penalty': '(1.1)'}), "(model=model, tokenizer=tokenizer, task=\n 'text-generation', temperature=temperature, max_new_tokens=150,\n repetition_penalty=1.1)\n", (2702, 2838), False, 'import transformers\n'), ((2886, 2929), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'generate_text'}), '(pipeline=generate_text)\n', (2905, 2929), False, 'from langchain.llms import HuggingFacePipeline\n'), ((3144, 3446), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'openai_api_base': "credentials['OPENAI_API_BASE']", 'openai_api_version': "credentials['OPENAI_API_VERSION']", 'deployment_name': '"""test-gpt4-32k"""', 'openai_api_key': "credentials['OPENAI_API_KEY']", 'openai_api_type': "credentials['OPENAI_API_TYPE']", 'max_tokens': '(150)', 'temperature': 'temperature'}), "(openai_api_base=credentials['OPENAI_API_BASE'],\n openai_api_version=credentials['OPENAI_API_VERSION'], deployment_name=\n 'test-gpt4-32k', openai_api_key=credentials['OPENAI_API_KEY'],\n openai_api_type=credentials['OPENAI_API_TYPE'], max_tokens=150,\n temperature=temperature)\n", (3159, 3446), False, 'from langchain.chat_models import AzureChatOpenAI\n'), ((12092, 12184), 'interpret.blackbox.MorrisSensitivity', 'MorrisSensitivity', (['predict', 'test'], {'feature_names': 'features', 'num_resamples': '(10)', 'num_levels': '(2)'}), '(predict, test, feature_names=features, num_resamples=10,\n num_levels=2)\n', (12109, 12184), False, 'from interpret.blackbox import MorrisSensitivity\n'), ((13240, 13277), 'pandas.DataFrame', 'pd.DataFrame', (['train'], {'columns': 'features'}), '(train, columns=features)\n', (13252, 13277), True, 'import pandas as pd\n'), ((13296, 13339), 'pandas.DataFrame', 'pd.DataFrame', (['y_train'], {'columns': "['eligible']"}), "(y_train, columns=['eligible'])\n", (13308, 13339), True, 'import pandas as pd\n'), ((13357, 13393), 'pandas.DataFrame', 'pd.DataFrame', (['test'], {'columns': 'features'}), '(test, columns=features)\n', (13369, 13393), True, 'import pandas as pd\n'), ((13411, 13453), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {'columns': "['eligible']"}), "(y_test, columns=['eligible'])\n", (13423, 13453), True, 'import pandas as pd\n'), ((13471, 13528), 'pandas.concat', 'pd.concat', (['[pd_train_x, pd_train_y]'], {'axis': '(1)', 'join': '"""inner"""'}), "([pd_train_x, pd_train_y], axis=1, join='inner')\n", (13480, 13528), True, 'import pandas as pd\n'), ((13664, 13767), 'dice_ml.Data', 'dice_ml.Data', ([], {'dataframe': 'dice_data', 'outcome_name': '"""eligible"""', 'continuous_features': 'continuous_features'}), "(dataframe=dice_data, outcome_name='eligible',\n continuous_features=continuous_features)\n", (13676, 13767), False, 'import dice_ml\n'), ((13782, 13829), 'dice_ml.Model', 'dice_ml.Model', ([], {'model': 'predict', 'backend': '"""sklearn"""'}), "(model=predict, backend='sklearn')\n", (13795, 13829), False, 'import dice_ml\n'), ((13841, 13896), 'dice_ml.Dice', 'dice_ml.Dice', (['dice_dataset', 'dice_model'], {'method': '"""random"""'}), "(dice_dataset, dice_model, method='random')\n", (13853, 13896), False, 'import dice_ml\n'), ((14192, 14217), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14215, 14217), False, 'import argparse\n'), ((16230, 16250), 'pandas.read_csv', 'pd.read_csv', (['dataset'], {}), '(dataset)\n', (16241, 16250), True, 'import pandas as pd\n'), ((16267, 16292), 'collections.defaultdict', 'defaultdict', (['LabelEncoder'], {}), '(LabelEncoder)\n', (16278, 16292), False, 'from collections import defaultdict\n'), ((16839, 16925), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'random_state', 'stratify': 'y'}), '(X, y, test_size=test_size, random_state=random_state,\n stratify=y)\n', (16855, 16925), False, 'from sklearn.model_selection import train_test_split\n'), ((18980, 19098), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train', 'indices'], {'test_size': 'configuration.test_size', 'random_state': '(42)', 'stratify': 'y_train'}), '(X_train, y_train, indices, test_size=configuration.\n test_size, random_state=42, stratify=y_train)\n', (18996, 19098), False, 'from sklearn.model_selection import train_test_split\n'), ((19523, 19553), 'pandas.DataFrame', 'pd.DataFrame', (['grid.cv_results_'], {}), '(grid.cv_results_)\n', (19535, 19553), True, 'import pandas as pd\n'), ((20523, 20562), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (20545, 20562), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((20602, 20645), 'pandas.read_json', 'pd.read_json', (['configuration.surrogate_model'], {}), '(configuration.surrogate_model)\n', (20614, 20645), True, 'import pandas as pd\n'), ((20861, 20960), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y', 'indices'], {'test_size': 'configuration.test_size', 'random_state': '(42)', 'stratify': 'y'}), '(X, y, indices, test_size=configuration.test_size,\n random_state=42, stratify=y)\n', (20877, 20960), False, 'from sklearn.model_selection import train_test_split\n'), ((3103, 3130), 'json.load', 'json.load', (['credentials_file'], {}), '(credentials_file)\n', (3112, 3130), False, 'import json\n'), ((7289, 7305), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (7297, 7305), True, 'import numpy as np\n'), ((8861, 8877), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (8869, 8877), True, 'import numpy as np\n'), ((9327, 9352), 'numpy.array', 'np.array', (['X'], {'dtype': 'object'}), '(X, dtype=object)\n', (9335, 9352), True, 'import numpy as np\n'), ((10606, 10623), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (10614, 10623), True, 'import numpy as np\n'), ((11681, 11751), 'numpy.save', 'np.save', (['f"""shap_values_{configuration.model_factory}.npy"""', 'shap_values'], {}), "(f'shap_values_{configuration.model_factory}.npy', shap_values)\n", (11688, 11751), True, 'import numpy as np\n'), ((13945, 13986), 'pandas.DataFrame', 'pd.DataFrame', (['pd_test_x'], {'columns': 'features'}), '(pd_test_x, columns=features)\n', (13957, 13986), True, 'import pandas as pd\n'), ((17116, 17159), 'pathlib.Path', 'pathlib.Path', (['configuration.system_template'], {}), '(configuration.system_template)\n', (17128, 17159), False, 'import pathlib\n'), ((20115, 20152), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (20136, 20152), False, 'from sklearn.metrics import classification_report\n'), ((20377, 20407), 'json.dump', 'json.dump', (['output', 'output_file'], {}), '(output, output_file)\n', (20386, 20407), False, 'import json\n'), ((21830, 21858), 'json.dump', 'json.dump', (['output', 'json_file'], {}), '(output, json_file)\n', (21839, 21858), False, 'import json\n'), ((24095, 24115), 'numpy.array', 'np.array', (['x_test_set'], {}), '(x_test_set)\n', (24103, 24115), True, 'import numpy as np\n'), ((9135, 9147), 'progressbar.Percentage', 'Percentage', ([], {}), '()\n', (9145, 9147), False, 'from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter\n'), ((9155, 9160), 'progressbar.Bar', 'Bar', ([], {}), '()\n', (9158, 9160), False, 'from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter\n'), ((9168, 9175), 'progressbar.Timer', 'Timer', ([], {}), '()\n', (9173, 9175), False, 'from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter\n'), ((9183, 9188), 'progressbar.ETA', 'ETA', ([], {}), '()\n', (9186, 9188), False, 'from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter\n'), ((9195, 9204), 'progressbar.Counter', 'Counter', ([], {}), '()\n', (9202, 9204), False, 'from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter\n'), ((11430, 11453), 'shap.sample', 'shap.sample', (['train', '(100)'], {}), '(train, 100)\n', (11441, 11453), False, 'import shap\n'), ((13082, 13115), 'pickle.dump', 'pickle.dump', (['explanations', 'output'], {}), '(explanations, output)\n', (13093, 13115), False, 'import pickle\n'), ((21782, 21819), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (21803, 21819), False, 'from sklearn.metrics import classification_report\n'), ((10532, 10566), 'time.sleep', 'time.sleep', (['configuration.throttle'], {}), '(configuration.throttle)\n', (10542, 10566), False, 'import time\n')]
from __future__ import annotations import asyncio import functools import logging import os import warnings from contextlib import asynccontextmanager, contextmanager from contextvars import ContextVar from typing import ( Any, AsyncGenerator, Dict, Generator, List, Optional, Sequence, Type, TypeVar, Union, cast, ) from uuid import UUID, uuid4 import langchain from langchain.callbacks.base import ( BaseCallbackHandler, BaseCallbackManager, ChainManagerMixin, LLMManagerMixin, RetrieverManagerMixin, RunManagerMixin, ToolManagerMixin, ) from langchain.callbacks.openai_info import OpenAICallbackHandler from langchain.callbacks.stdout import StdOutCallbackHandler from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1 from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler from langchain.callbacks.tracers.wandb import WandbTracer from langchain.schema import ( AgentAction, AgentFinish, Document, LLMResult, ) from langchain.schema.messages import BaseMessage, get_buffer_string logger = logging.getLogger(__name__) Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar( "openai_callback", default=None ) tracing_callback_var: ContextVar[ Optional[LangChainTracerV1] ] = ContextVar( # noqa: E501 "tracing_callback", default=None ) wandb_tracing_callback_var: ContextVar[ Optional[WandbTracer] ] = ContextVar( # noqa: E501 "tracing_wandb_callback", default=None ) tracing_v2_callback_var: ContextVar[ Optional[LangChainTracer] ] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) def _get_debug() -> bool: return langchain.debug @contextmanager def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]: """Get the OpenAI callback handler in a context manager. which conveniently exposes token and cost information. Returns: OpenAICallbackHandler: The OpenAI callback handler. Example: >>> with get_openai_callback() as cb: ... # Use the OpenAI callback handler """ cb = OpenAICallbackHandler() openai_callback_var.set(cb) yield cb openai_callback_var.set(None) @contextmanager def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Get the Deprecated LangChainTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: TracerSessionV1: The LangChainTracer session. Example: >>> with tracing_enabled() as session: ... # Use the LangChainTracer session """ cb = LangChainTracerV1() session = cast(TracerSessionV1, cb.load_session(session_name)) tracing_callback_var.set(cb) yield session tracing_callback_var.set(None) @contextmanager def wandb_tracing_enabled( session_name: str = "default", ) -> Generator[None, None, None]: """Get the WandbTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: None Example: >>> with wandb_tracing_enabled() as session: ... # Use the WandbTracer session """ cb = WandbTracer() wandb_tracing_callback_var.set(cb) yield None wandb_tracing_callback_var.set(None) @contextmanager def tracing_v2_enabled( project_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, ) -> Generator[None, None, None]: """Instruct LangChain to log all runs in context to LangSmith. Args: project_name (str, optional): The name of the project. Defaults to "default". example_id (str or UUID, optional): The ID of the example. Defaults to None. Returns: None Example: >>> with tracing_v2_enabled(): ... # LangChain code will automatically be traced """ # Issue a warning that this is experimental warnings.warn( "The tracing v2 API is in development. " "This is not yet stable and may change in the future." ) if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer( example_id=example_id, project_name=project_name, ) tracing_v2_callback_var.set(cb) yield tracing_v2_callback_var.set(None) @contextmanager def trace_as_chain_group( group_name: str, *, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, ) -> Generator[CallbackManager, None, None]: """Get a callback manager for a chain group in a context manager. Useful for grouping different calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: CallbackManager: The callback manager for the chain group. Example: >>> with trace_as_chain_group("group_name") as manager: ... # Use the callback manager for the chain group ... llm.predict("Foo", callbacks=manager) """ cb = LangChainTracer( project_name=project_name, example_id=example_id, ) cm = CallbackManager.configure( inheritable_callbacks=[cb], inheritable_tags=tags, ) run_manager = cm.on_chain_start({"name": group_name}, {}) yield run_manager.get_child() run_manager.on_chain_end({}) @asynccontextmanager async def atrace_as_chain_group( group_name: str, *, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, ) -> AsyncGenerator[AsyncCallbackManager, None]: """Get an async callback manager for a chain group in a context manager. Useful for grouping different async calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: AsyncCallbackManager: The async callback manager for the chain group. Example: >>> async with atrace_as_chain_group("group_name") as manager: ... # Use the async callback manager for the chain group ... await llm.apredict("Foo", callbacks=manager) """ cb = LangChainTracer( project_name=project_name, example_id=example_id, ) cm = AsyncCallbackManager.configure( inheritable_callbacks=[cb], inheritable_tags=tags ) run_manager = await cm.on_chain_start({"name": group_name}, {}) try: yield run_manager.get_child() finally: await run_manager.on_chain_end({}) def _handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for CallbackManager.""" message_strings: Optional[List[str]] = None for handler in handlers: try: if ignore_condition_name is None or not getattr( handler, ignore_condition_name ): getattr(handler, event_name)(*args, **kwargs) except NotImplementedError as e: if event_name == "on_chat_model_start": if message_strings is None: message_strings = [get_buffer_string(m) for m in args[1]] _handle_event( [handler], "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) if handler.raise_error: raise e async def _ahandle_event_for_handler( handler: BaseCallbackHandler, event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) else: if handler.run_inline: event(*args, **kwargs) else: await asyncio.get_event_loop().run_in_executor( None, functools.partial(event, *args, **kwargs) ) except NotImplementedError as e: if event_name == "on_chat_model_start": message_strings = [get_buffer_string(m) for m in args[1]] await _ahandle_event_for_handler( handler, "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) if handler.raise_error: raise e async def _ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for AsyncCallbackManager.""" for handler in [h for h in handlers if h.run_inline]: await _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) await asyncio.gather( *( _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) for handler in handlers if not handler.run_inline ) ) BRM = TypeVar("BRM", bound="BaseRunManager") class BaseRunManager(RunManagerMixin): """Base class for run manager (a bound callback manager).""" def __init__( self, *, run_id: UUID, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler], parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, inheritable_tags: Optional[List[str]] = None, ) -> None: """Initialize the run manager. Args: run_id (UUID): The ID of the run. handlers (List[BaseCallbackHandler]): The list of handlers. inheritable_handlers (List[BaseCallbackHandler]): The list of inheritable handlers. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. tags (Optional[List[str]]): The list of tags. inheritable_tags (Optional[List[str]]): The list of inheritable tags. """ self.run_id = run_id self.handlers = handlers self.inheritable_handlers = inheritable_handlers self.parent_run_id = parent_run_id self.tags = tags or [] self.inheritable_tags = inheritable_tags or [] @classmethod def get_noop_manager(cls: Type[BRM]) -> BRM: """Return a manager that doesn't perform any operations. Returns: BaseRunManager: The noop manager. """ return cls( run_id=uuid4(), handlers=[], inheritable_handlers=[], tags=[], inheritable_tags=[], ) class RunManager(BaseRunManager): """Sync Run Manager.""" def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncRunManager(BaseRunManager): """Async Run Manager.""" async def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): """Callback manager for LLM run.""" def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ _handle_event( self.handlers, "on_llm_new_token", "ignore_llm", token=token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ _handle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): """Async callback manager for LLM run.""" async def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ await _ahandle_event( self.handlers, "on_llm_new_token", "ignore_llm", token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ await _ahandle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForChainRun(RunManager, ChainManagerMixin): """Callback manager for chain run.""" def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running. Args: outputs (Dict[str, Any]): The outputs of the chain. """ _handle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin): """Async callback manager for chain run.""" def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: AsyncCallbackManager: The child callback manager. """ manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running. Args: outputs (Dict[str, Any]): The outputs of the chain. """ await _ahandle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForToolRun(RunManager, ToolManagerMixin): """Callback manager for tool run.""" def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager def on_tool_end( self, output: str, **kwargs: Any, ) -> None: """Run when tool ends running. Args: output (str): The output of the tool. """ _handle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin): """Async callback manager for tool run.""" def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag to add to the child callback manager. Defaults to None. Returns: AsyncCallbackManager: The child callback manager. """ manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager async def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running. Args: output (str): The output of the tool. """ await _ahandle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForRetrieverRun(RunManager, RetrieverManagerMixin): """Callback manager for retriever run.""" def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager.""" manager = CallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager def on_retriever_end( self, documents: Sequence[Document], **kwargs: Any, ) -> None: """Run when retriever ends running.""" _handle_event( self.handlers, "on_retriever_end", "ignore_retriever", documents, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_retriever_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when retriever errors.""" _handle_event( self.handlers, "on_retriever_error", "ignore_retriever", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForRetrieverRun( AsyncRunManager, RetrieverManagerMixin, ): """Async callback manager for retriever run.""" def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager.""" manager = AsyncCallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager async def on_retriever_end( self, documents: Sequence[Document], **kwargs: Any ) -> None: """Run when retriever ends running.""" await _ahandle_event( self.handlers, "on_retriever_end", "ignore_retriever", documents, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_retriever_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when retriever errors.""" await _ahandle_event( self.handlers, "on_retriever_error", "ignore_retriever", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManager(BaseCallbackManager): """Callback manager that can be used to handle callbacks from langchain.""" def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each prompt as an LLM run. """ managers = [] for prompt in prompts: run_id_ = uuid4() _handle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) return managers def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each list of messages as an LLM run. """ managers = [] for message_list in messages: run_id_ = uuid4() _handle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) return managers def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Dict[str, Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: CallbackManagerForChainRun: The callback manager for the chain run. """ if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return CallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: CallbackManagerForToolRun: The callback manager for the tool run. """ if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return CallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) def on_retriever_start( self, query: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_retriever_start", "ignore_retriever", query, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForRetrieverRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, ) -> CallbackManager: """Configure the callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. Returns: CallbackManager: The configured callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, ) class AsyncCallbackManager(BaseCallbackManager): """Async callback manager that can be used to handle callbacks from LangChain.""" @property def is_async(self) -> bool: """Return whether the handler is async.""" return True async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[AsyncCallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each prompt. """ tasks = [] managers = [] for prompt in prompts: run_id_ = uuid4() tasks.append( _ahandle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) await asyncio.gather(*tasks) return managers async def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> Any: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each inner message list. """ tasks = [] managers = [] for message_list in messages: run_id_ = uuid4() tasks.append( _ahandle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) ) await asyncio.gather(*tasks) return managers async def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Dict[str, Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: AsyncCallbackManagerForChainRun: The async callback manager for the chain run. """ if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return AsyncCallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) async def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: AsyncCallbackManagerForToolRun: The async callback manager for the tool run. """ if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) return AsyncCallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) async def on_retriever_start( self, query: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_retriever_start", "ignore_retriever", query, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForRetrieverRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, ) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, ) -> AsyncCallbackManager: """Configure the async callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. Returns: AsyncCallbackManager: The configured async callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, ) T = TypeVar("T", CallbackManager, AsyncCallbackManager) def env_var_is_set(env_var: str) -> bool: """Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise. """ return env_var in os.environ and os.environ[env_var] not in ( "", "0", "false", "False", ) def _configure( callback_manager_cls: Type[T], inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, ) -> T: """Configure the callback manager. Args: callback_manager_cls (Type[T]): The callback manager class. inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. Returns: T: The configured callback manager. """ callback_manager = callback_manager_cls(handlers=[]) if inheritable_callbacks or local_callbacks: if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: inheritable_callbacks_ = inheritable_callbacks or [] callback_manager = callback_manager_cls( handlers=inheritable_callbacks_.copy(), inheritable_handlers=inheritable_callbacks_.copy(), ) else: callback_manager = callback_manager_cls( handlers=inheritable_callbacks.handlers, inheritable_handlers=inheritable_callbacks.inheritable_handlers, parent_run_id=inheritable_callbacks.parent_run_id, tags=inheritable_callbacks.tags, inheritable_tags=inheritable_callbacks.inheritable_tags, ) local_handlers_ = ( local_callbacks if isinstance(local_callbacks, list) else (local_callbacks.handlers if local_callbacks else []) ) for handler in local_handlers_: callback_manager.add_handler(handler, False) if inheritable_tags or local_tags: callback_manager.add_tags(inheritable_tags or []) callback_manager.add_tags(local_tags or [], False) tracer = tracing_callback_var.get() wandb_tracer = wandb_tracing_callback_var.get() open_ai = openai_callback_var.get() tracing_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING") or tracer is not None or env_var_is_set("LANGCHAIN_HANDLER") ) wandb_tracing_enabled_ = ( env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None ) tracer_v2 = tracing_v2_callback_var.get() tracing_v2_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None ) tracer_project = os.environ.get( "LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default") ) debug = _get_debug() if ( verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or wandb_tracing_enabled_ or open_ai is not None ): if verbose and not any( isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers ): if debug: pass else: callback_manager.add_handler(StdOutCallbackHandler(), False) if debug and not any( isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(ConsoleCallbackHandler(), True) if tracing_enabled_ and not any( isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers ): if tracer: callback_manager.add_handler(tracer, True) else: handler = LangChainTracerV1() handler.load_session(tracer_project) callback_manager.add_handler(handler, True) if wandb_tracing_enabled_ and not any( isinstance(handler, WandbTracer) for handler in callback_manager.handlers ): if wandb_tracer: callback_manager.add_handler(wandb_tracer, True) else: handler = WandbTracer() callback_manager.add_handler(handler, True) if tracing_v2_enabled_ and not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): if tracer_v2: callback_manager.add_handler(tracer_v2, True) else: try: handler = LangChainTracer(project_name=tracer_project) callback_manager.add_handler(handler, True) except Exception as e: logger.warning( "Unable to load requested LangChainTracer." " To disable this warning," " unset the LANGCHAIN_TRACING_V2 environment variables.", e, ) if open_ai is not None and not any( isinstance(handler, OpenAICallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(open_ai, True) return callback_manager
[ "langchain.callbacks.stdout.StdOutCallbackHandler", "langchain.callbacks.tracers.wandb.WandbTracer", "langchain.callbacks.openai_info.OpenAICallbackHandler", "langchain.callbacks.tracers.stdout.ConsoleCallbackHandler", "langchain.callbacks.tracers.langchain.LangChainTracer", "langchain.callbacks.tracers.langchain_v1.LangChainTracerV1", "langchain.schema.messages.get_buffer_string" ]
[((1198, 1225), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1215, 1225), False, 'import logging\n'), ((1370, 1413), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1380, 1413), False, 'from contextvars import ContextVar\n'), ((1490, 1534), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback"""'], {'default': 'None'}), "('tracing_callback', default=None)\n", (1500, 1534), False, 'from contextvars import ContextVar\n'), ((1625, 1675), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_wandb_callback"""'], {'default': 'None'}), "('tracing_wandb_callback', default=None)\n", (1635, 1675), False, 'from contextvars import ContextVar\n'), ((1768, 1815), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback_v2"""'], {'default': 'None'}), "('tracing_callback_v2', default=None)\n", (1778, 1815), False, 'from contextvars import ContextVar\n'), ((11081, 11119), 'typing.TypeVar', 'TypeVar', (['"""BRM"""'], {'bound': '"""BaseRunManager"""'}), "('BRM', bound='BaseRunManager')\n", (11088, 11119), False, 'from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Sequence, Type, TypeVar, Union, cast\n'), ((44047, 44098), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'CallbackManager', 'AsyncCallbackManager'], {}), "('T', CallbackManager, AsyncCallbackManager)\n", (44054, 44098), False, 'from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Sequence, Type, TypeVar, Union, cast\n'), ((2305, 2328), 'langchain.callbacks.openai_info.OpenAICallbackHandler', 'OpenAICallbackHandler', ([], {}), '()\n', (2326, 2328), False, 'from langchain.callbacks.openai_info import OpenAICallbackHandler\n'), ((2894, 2913), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (2911, 2913), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((3496, 3509), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (3507, 3509), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((4254, 4371), 'warnings.warn', 'warnings.warn', (['"""The tracing v2 API is in development. This is not yet stable and may change in the future."""'], {}), "(\n 'The tracing v2 API is in development. This is not yet stable and may change in the future.'\n )\n", (4267, 4371), False, 'import warnings\n'), ((4470, 4535), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'example_id': 'example_id', 'project_name': 'project_name'}), '(example_id=example_id, project_name=project_name)\n', (4485, 4535), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((5734, 5799), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'project_name', 'example_id': 'example_id'}), '(project_name=project_name, example_id=example_id)\n', (5749, 5799), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((7212, 7277), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'project_name', 'example_id': 'example_id'}), '(project_name=project_name, example_id=example_id)\n', (7227, 7277), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((4444, 4460), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (4448, 4460), False, 'from uuid import UUID, uuid4\n'), ((47360, 47406), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_SESSION"""', '"""default"""'], {}), "('LANGCHAIN_SESSION', 'default')\n", (47374, 47406), False, 'import os\n'), ((9287, 9321), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['event'], {}), '(event)\n', (9314, 9321), False, 'import asyncio\n'), ((28919, 28926), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (28924, 28926), False, 'from uuid import UUID, uuid4\n'), ((30362, 30369), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (30367, 30369), False, 'from uuid import UUID, uuid4\n'), ((31765, 31772), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (31770, 31772), False, 'from uuid import UUID, uuid4\n'), ((33092, 33099), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (33097, 33099), False, 'from uuid import UUID, uuid4\n'), ((33999, 34006), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (34004, 34006), False, 'from uuid import UUID, uuid4\n'), ((36726, 36733), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (36731, 36733), False, 'from uuid import UUID, uuid4\n'), ((37570, 37592), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (37584, 37592), False, 'import asyncio\n'), ((38358, 38365), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (38363, 38365), False, 'from uuid import UUID, uuid4\n'), ((39222, 39244), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (39236, 39244), False, 'import asyncio\n'), ((39927, 39934), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (39932, 39934), False, 'from uuid import UUID, uuid4\n'), ((41320, 41327), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (41325, 41327), False, 'from uuid import UUID, uuid4\n'), ((42250, 42257), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (42255, 42257), False, 'from uuid import UUID, uuid4\n'), ((12586, 12593), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (12591, 12593), False, 'from uuid import UUID, uuid4\n'), ((48091, 48115), 'langchain.callbacks.tracers.stdout.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (48113, 48115), False, 'from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler\n'), ((48405, 48424), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (48422, 48424), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((48820, 48833), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (48831, 48833), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((9768, 9788), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (9785, 9788), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n'), ((47868, 47891), 'langchain.callbacks.stdout.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (47889, 47891), False, 'from langchain.callbacks.stdout import StdOutCallbackHandler\n'), ((49208, 49252), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'tracer_project'}), '(project_name=tracer_project)\n', (49223, 49252), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((8253, 8273), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (8270, 8273), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n'), ((9588, 9629), 'functools.partial', 'functools.partial', (['event', '*args'], {}), '(event, *args, **kwargs)\n', (9605, 9629), False, 'import functools\n'), ((9516, 9540), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9538, 9540), False, 'import asyncio\n')]
import os from transformers import AutoTokenizer from configs import ( EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME, ) import importlib from text_splitter import zh_title_enhance as func_zh_title_enhance import langchain.document_loaders from langchain.docstore.document import Document from langchain.text_splitter import TextSplitter from pathlib import Path import json from concurrent.futures import ThreadPoolExecutor from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config import io from typing import List, Union, Callable, Dict, Optional, Tuple, Generator import chardet def validate_kb_name(knowledge_base_id: str) -> bool: # 检查是否包含预期外的字符或路径攻击关键字 if "../" in knowledge_base_id: return False return True def get_kb_path(knowledge_base_name: str): return os.path.join(KB_ROOT_PATH, knowledge_base_name) def get_doc_path(knowledge_base_name: str): return os.path.join(get_kb_path(knowledge_base_name), "content") def get_vs_path(knowledge_base_name: str, vector_name: str): return os.path.join(get_kb_path(knowledge_base_name), vector_name) def get_file_path(knowledge_base_name: str, doc_name: str): return os.path.join(get_doc_path(knowledge_base_name), doc_name) def list_kbs_from_folder(): return [f for f in os.listdir(KB_ROOT_PATH) if os.path.isdir(os.path.join(KB_ROOT_PATH, f))] def list_files_from_folder(kb_name: str): doc_path = get_doc_path(kb_name) return [file for file in os.listdir(doc_path) if os.path.isfile(os.path.join(doc_path, file))] def load_embeddings(model: str = EMBEDDING_MODEL, device: str = embedding_device()): ''' 从缓存中加载embeddings,可以避免多线程时竞争加载。 ''' from server.knowledge_base.kb_cache.base import embeddings_pool return embeddings_pool.load_embeddings(model=model, device=device) LOADER_DICT = {"UnstructuredHTMLLoader": ['.html'], "UnstructuredMarkdownLoader": ['.md'], "CustomJSONLoader": [".json"], "CSVLoader": [".csv"], "RapidOCRPDFLoader": [".pdf"], "RapidOCRLoader": ['.png', '.jpg', '.jpeg', '.bmp'], "UnstructuredFileLoader": ['.eml', '.msg', '.rst', '.rtf', '.txt', '.xml', '.docx', '.epub', '.odt', '.ppt', '.pptx', '.tsv'], } SUPPORTED_EXTS = [ext for sublist in LOADER_DICT.values() for ext in sublist] class CustomJSONLoader(langchain.document_loaders.JSONLoader): ''' langchain的JSONLoader需要jq,在win上使用不便,进行替代。针对langchain==0.0.286 ''' def __init__( self, file_path: Union[str, Path], content_key: Optional[str] = None, metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None, text_content: bool = True, json_lines: bool = False, ): """Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON or JSON Lines file. content_key (str): The key to use to extract the content from the JSON if results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicate whether the content is in string format, default to True. json_lines (bool): Boolean flag to indicate whether the input is in JSON Lines format. """ self.file_path = Path(file_path).resolve() self._content_key = content_key self._metadata_func = metadata_func self._text_content = text_content self._json_lines = json_lines def _parse(self, content: str, docs: List[Document]) -> None: """Convert given content to documents.""" data = json.loads(content) # Perform some validation # This is not a perfect validation, but it should catch most cases # and prevent the user from getting a cryptic error later on. if self._content_key is not None: self._validate_content_key(data) if self._metadata_func is not None: self._validate_metadata_func(data) for i, sample in enumerate(data, len(docs) + 1): text = self._get_text(sample=sample) metadata = self._get_metadata( sample=sample, source=str(self.file_path), seq_num=i ) docs.append(Document(page_content=text, metadata=metadata)) langchain.document_loaders.CustomJSONLoader = CustomJSONLoader def get_LoaderClass(file_extension): for LoaderClass, extensions in LOADER_DICT.items(): if file_extension in extensions: return LoaderClass # 把一些向量化共用逻辑从KnowledgeFile抽取出来,等langchain支持内存文件的时候,可以将非磁盘文件向量化 def get_loader(loader_name: str, file_path_or_content: Union[str, bytes, io.StringIO, io.BytesIO]): ''' 根据loader_name和文件路径或内容返回文档加载器。 ''' try: if loader_name in ["RapidOCRPDFLoader", "RapidOCRLoader"]: document_loaders_module = importlib.import_module('document_loaders') else: document_loaders_module = importlib.import_module('langchain.document_loaders') DocumentLoader = getattr(document_loaders_module, loader_name) except Exception as e: msg = f"为文件{file_path_or_content}查找加载器{loader_name}时出错:{e}" logger.error(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else None) document_loaders_module = importlib.import_module('langchain.document_loaders') DocumentLoader = getattr(document_loaders_module, "UnstructuredFileLoader") if loader_name == "UnstructuredFileLoader": loader = DocumentLoader(file_path_or_content, autodetect_encoding=True) elif loader_name == "CSVLoader": # 自动识别文件编码类型,避免langchain loader 加载文件报编码错误 with open(file_path_or_content, 'rb') as struct_file: encode_detect = chardet.detect(struct_file.read()) if encode_detect: loader = DocumentLoader(file_path_or_content, encoding=encode_detect["encoding"]) else: loader = DocumentLoader(file_path_or_content, encoding="utf-8") elif loader_name == "JSONLoader": loader = DocumentLoader(file_path_or_content, jq_schema=".", text_content=False) elif loader_name == "CustomJSONLoader": loader = DocumentLoader(file_path_or_content, text_content=False) elif loader_name == "UnstructuredMarkdownLoader": loader = DocumentLoader(file_path_or_content, mode="elements") elif loader_name == "UnstructuredHTMLLoader": loader = DocumentLoader(file_path_or_content, mode="elements") else: loader = DocumentLoader(file_path_or_content) return loader def make_text_splitter( splitter_name: str = TEXT_SPLITTER_NAME, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, llm_model: str = LLM_MODEL, ): """ 根据参数获取特定的分词器 """ splitter_name = splitter_name or "SpacyTextSplitter" try: if splitter_name == "MarkdownHeaderTextSplitter": # MarkdownHeaderTextSplitter特殊判定 headers_to_split_on = text_splitter_dict[splitter_name]['headers_to_split_on'] text_splitter = langchain.text_splitter.MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on) else: try: ## 优先使用用户自定义的text_splitter text_splitter_module = importlib.import_module('text_splitter') TextSplitter = getattr(text_splitter_module, splitter_name) except: ## 否则使用langchain的text_splitter text_splitter_module = importlib.import_module('langchain.text_splitter') TextSplitter = getattr(text_splitter_module, splitter_name) if text_splitter_dict[splitter_name]["source"] == "tiktoken": ## 从tiktoken加载 try: text_splitter = TextSplitter.from_tiktoken_encoder( encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"], pipeline="zh_core_web_sm", chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except: text_splitter = TextSplitter.from_tiktoken_encoder( encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"], chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_dict[splitter_name]["source"] == "huggingface": ## 从huggingface加载 if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "": config = get_model_worker_config(llm_model) text_splitter_dict[splitter_name]["tokenizer_name_or_path"] = \ config.get("model_path") if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "gpt2": from transformers import GPT2TokenizerFast from langchain.text_splitter import CharacterTextSplitter tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") else: ## 字符长度加载 tokenizer = AutoTokenizer.from_pretrained( text_splitter_dict[splitter_name]["tokenizer_name_or_path"], trust_remote_code=True) text_splitter = TextSplitter.from_huggingface_tokenizer( tokenizer=tokenizer, chunk_size=chunk_size, chunk_overlap=chunk_overlap ) else: try: text_splitter = TextSplitter( pipeline="zh_core_web_sm", chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except: text_splitter = TextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except Exception as e: print(e) text_splitter_module = importlib.import_module('langchain.text_splitter') TextSplitter = getattr(text_splitter_module, "RecursiveCharacterTextSplitter") text_splitter = TextSplitter(chunk_size=250, chunk_overlap=50) return text_splitter class KnowledgeFile: def __init__( self, filename: str, knowledge_base_name: str ): ''' 对应知识库目录中的文件,必须是磁盘上存在的才能进行向量化等操作。 ''' self.kb_name = knowledge_base_name self.filename = filename self.ext = os.path.splitext(filename)[-1].lower() if self.ext not in SUPPORTED_EXTS: raise ValueError(f"暂未支持的文件格式 {self.ext}") self.filepath = get_file_path(knowledge_base_name, filename) self.docs = None self.splited_docs = None self.document_loader_name = get_LoaderClass(self.ext) self.text_splitter_name = TEXT_SPLITTER_NAME def file2docs(self, refresh: bool=False): if self.docs is None or refresh: logger.info(f"{self.document_loader_name} used for {self.filepath}") loader = get_loader(self.document_loader_name, self.filepath) self.docs = loader.load() return self.docs def docs2texts( self, docs: List[Document] = None, zh_title_enhance: bool = ZH_TITLE_ENHANCE, refresh: bool = False, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, text_splitter: TextSplitter = None, ): docs = docs or self.file2docs(refresh=refresh) if not docs: return [] if self.ext not in [".csv"]: if text_splitter is None: text_splitter = make_text_splitter(splitter_name=self.text_splitter_name, chunk_size=chunk_size, chunk_overlap=chunk_overlap) if self.text_splitter_name == "MarkdownHeaderTextSplitter": docs = text_splitter.split_text(docs[0].page_content) for doc in docs: # 如果文档有元数据 if doc.metadata: doc.metadata["source"] = os.path.basename(self.filepath) else: docs = text_splitter.split_documents(docs) print(f"文档切分示例:{docs[0]}") if zh_title_enhance: docs = func_zh_title_enhance(docs) self.splited_docs = docs return self.splited_docs def file2text( self, zh_title_enhance: bool = ZH_TITLE_ENHANCE, refresh: bool = False, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, text_splitter: TextSplitter = None, ): if self.splited_docs is None or refresh: docs = self.file2docs() self.splited_docs = self.docs2texts(docs=docs, zh_title_enhance=zh_title_enhance, refresh=refresh, chunk_size=chunk_size, chunk_overlap=chunk_overlap, text_splitter=text_splitter) return self.splited_docs def file_exist(self): return os.path.isfile(self.filepath) def get_mtime(self): return os.path.getmtime(self.filepath) def get_size(self): return os.path.getsize(self.filepath) def files2docs_in_thread( files: List[Union[KnowledgeFile, Tuple[str, str], Dict]], chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, zh_title_enhance: bool = ZH_TITLE_ENHANCE, pool: ThreadPoolExecutor = None, ) -> Generator: ''' 利用多线程批量将磁盘文件转化成langchain Document. 如果传入参数是Tuple,形式为(filename, kb_name) 生成器返回值为 status, (kb_name, file_name, docs | error) ''' def file2docs(*, file: KnowledgeFile, **kwargs) -> Tuple[bool, Tuple[str, str, List[Document]]]: try: return True, (file.kb_name, file.filename, file.file2text(**kwargs)) except Exception as e: msg = f"从文件 {file.kb_name}/{file.filename} 加载文档时出错:{e}" logger.error(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else None) return False, (file.kb_name, file.filename, msg) kwargs_list = [] for i, file in enumerate(files): kwargs = {} try: if isinstance(file, tuple) and len(file) >= 2: filename=file[0] kb_name=file[1] file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name) elif isinstance(file, dict): filename = file.pop("filename") kb_name = file.pop("kb_name") kwargs.update(file) file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name) kwargs["file"] = file kwargs["chunk_size"] = chunk_size kwargs["chunk_overlap"] = chunk_overlap kwargs["zh_title_enhance"] = zh_title_enhance kwargs_list.append(kwargs) except Exception as e: yield False, (kb_name, filename, str(e)) for result in run_in_thread_pool(func=file2docs, params=kwargs_list, pool=pool): yield result if __name__ == "__main__": from pprint import pprint kb_file = KnowledgeFile(filename="test.txt", knowledge_base_name="samples") # kb_file.text_splitter_name = "RecursiveCharacterTextSplitter" docs = kb_file.file2docs() pprint(docs[-1]) docs = kb_file.file2text() pprint(docs[-1])
[ "langchain.text_splitter.TextSplitter.from_huggingface_tokenizer", "langchain.text_splitter.TextSplitter.from_tiktoken_encoder", "langchain.docstore.document.Document", "langchain.text_splitter.TextSplitter" ]
[((964, 1011), 'os.path.join', 'os.path.join', (['KB_ROOT_PATH', 'knowledge_base_name'], {}), '(KB_ROOT_PATH, knowledge_base_name)\n', (976, 1011), False, 'import os\n'), ((1789, 1807), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (1805, 1807), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((1940, 1999), 'server.knowledge_base.kb_cache.base.embeddings_pool.load_embeddings', 'embeddings_pool.load_embeddings', ([], {'model': 'model', 'device': 'device'}), '(model=model, device=device)\n', (1971, 1999), False, 'from server.knowledge_base.kb_cache.base import embeddings_pool\n'), ((15816, 15881), 'server.utils.run_in_thread_pool', 'run_in_thread_pool', ([], {'func': 'file2docs', 'params': 'kwargs_list', 'pool': 'pool'}), '(func=file2docs, params=kwargs_list, pool=pool)\n', (15834, 15881), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((16147, 16163), 'pprint.pprint', 'pprint', (['docs[-1]'], {}), '(docs[-1])\n', (16153, 16163), False, 'from pprint import pprint\n'), ((16200, 16216), 'pprint.pprint', 'pprint', (['docs[-1]'], {}), '(docs[-1])\n', (16206, 16216), False, 'from pprint import pprint\n'), ((4174, 4193), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (4184, 4193), False, 'import json\n'), ((13844, 13873), 'os.path.isfile', 'os.path.isfile', (['self.filepath'], {}), '(self.filepath)\n', (13858, 13873), False, 'import os\n'), ((13915, 13946), 'os.path.getmtime', 'os.path.getmtime', (['self.filepath'], {}), '(self.filepath)\n', (13931, 13946), False, 'import os\n'), ((13987, 14017), 'os.path.getsize', 'os.path.getsize', (['self.filepath'], {}), '(self.filepath)\n', (14002, 14017), False, 'import os\n'), ((1445, 1469), 'os.listdir', 'os.listdir', (['KB_ROOT_PATH'], {}), '(KB_ROOT_PATH)\n', (1455, 1469), False, 'import os\n'), ((1641, 1661), 'os.listdir', 'os.listdir', (['doc_path'], {}), '(doc_path)\n', (1651, 1661), False, 'import os\n'), ((5418, 5461), 'importlib.import_module', 'importlib.import_module', (['"""document_loaders"""'], {}), "('document_loaders')\n", (5441, 5461), False, 'import importlib\n'), ((5514, 5567), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (5537, 5567), False, 'import importlib\n'), ((5742, 5829), 'configs.logger.error', 'logger.error', (['f"""{e.__class__.__name__}: {msg}"""'], {'exc_info': '(e if log_verbose else None)'}), "(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else\n None)\n", (5754, 5829), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((5881, 5934), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (5904, 5934), False, 'import importlib\n'), ((10617, 10667), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (10640, 10667), False, 'import importlib\n'), ((10779, 10825), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': '(250)', 'chunk_overlap': '(50)'}), '(chunk_size=250, chunk_overlap=50)\n', (10791, 10825), False, 'from langchain.text_splitter import TextSplitter\n'), ((11618, 11686), 'configs.logger.info', 'logger.info', (['f"""{self.document_loader_name} used for {self.filepath}"""'], {}), "(f'{self.document_loader_name} used for {self.filepath}')\n", (11629, 11686), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((12910, 12937), 'text_splitter.zh_title_enhance', 'func_zh_title_enhance', (['docs'], {}), '(docs)\n', (12931, 12937), True, 'from text_splitter import zh_title_enhance as func_zh_title_enhance\n'), ((1499, 1528), 'os.path.join', 'os.path.join', (['KB_ROOT_PATH', 'f'], {}), '(KB_ROOT_PATH, f)\n', (1511, 1528), False, 'import os\n'), ((1692, 1720), 'os.path.join', 'os.path.join', (['doc_path', 'file'], {}), '(doc_path, file)\n', (1704, 1720), False, 'import os\n'), ((3852, 3867), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (3856, 3867), False, 'from pathlib import Path\n'), ((4809, 4855), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (4817, 4855), False, 'from langchain.docstore.document import Document\n'), ((7841, 7881), 'importlib.import_module', 'importlib.import_module', (['"""text_splitter"""'], {}), "('text_splitter')\n", (7864, 7881), False, 'import importlib\n'), ((14757, 14844), 'configs.logger.error', 'logger.error', (['f"""{e.__class__.__name__}: {msg}"""'], {'exc_info': '(e if log_verbose else None)'}), "(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else\n None)\n", (14769, 14844), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((8049, 8099), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (8072, 8099), False, 'import importlib\n'), ((8324, 8521), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], pipeline='zh_core_web_sm',\n chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n", (8358, 8521), False, 'from langchain.text_splitter import TextSplitter\n'), ((9895, 10012), 'langchain.text_splitter.TextSplitter.from_huggingface_tokenizer', 'TextSplitter.from_huggingface_tokenizer', ([], {'tokenizer': 'tokenizer', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(tokenizer=tokenizer, chunk_size=\n chunk_size, chunk_overlap=chunk_overlap)\n', (9934, 10012), False, 'from langchain.text_splitter import TextSplitter\n'), ((11140, 11166), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (11156, 11166), False, 'import os\n'), ((8691, 8861), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (8725, 8861), False, 'from langchain.text_splitter import TextSplitter\n'), ((9160, 9194), 'server.utils.get_model_worker_config', 'get_model_worker_config', (['llm_model'], {}), '(llm_model)\n', (9183, 9194), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((9592, 9633), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (9625, 9633), False, 'from transformers import GPT2TokenizerFast\n'), ((9699, 9818), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (["text_splitter_dict[splitter_name]['tokenizer_name_or_path']"], {'trust_remote_code': '(True)'}), "(text_splitter_dict[splitter_name][\n 'tokenizer_name_or_path'], trust_remote_code=True)\n", (9728, 9818), False, 'from transformers import AutoTokenizer\n'), ((10161, 10256), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(pipeline='zh_core_web_sm', chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (10173, 10256), False, 'from langchain.text_splitter import TextSplitter\n'), ((12717, 12748), 'os.path.basename', 'os.path.basename', (['self.filepath'], {}), '(self.filepath)\n', (12733, 12748), False, 'import os\n'), ((10407, 10471), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (10419, 10471), False, 'from langchain.text_splitter import TextSplitter\n')]
"""Create a LangChain chain for question/answering.""" from langchain.callbacks.manager import AsyncCallbackManager from langchain.callbacks.tracers import LangChainTracer from langchain.chains import ConversationalRetrievalChain, RetrievalQAWithSourcesChain from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint from langchain.vectorstores.base import VectorStore from langchain.vectorstores import Vectara from langchain.memory import ConversationBufferMemory from dotenv import load_dotenv from langchain.chat_models import ChatOpenAI from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough, RunnableParallel from langchain.schema import StrOutputParser from operator import itemgetter import os # def format_docs(docs) -> str: return "\n\n".join(doc.page_content for doc in docs) def get_chain( vectorstore: VectorStore, question_handler, stream_handler ) -> RunnableParallel: """Create a chain for question/answering.""" load_dotenv() manager = AsyncCallbackManager([]) question_manager = AsyncCallbackManager([question_handler]) stream_manager = AsyncCallbackManager([stream_handler]) hf_llm = HuggingFaceEndpoint( endpoint_url="https://euo6lqs9bqkddhci.us-east-1.aws.endpoints.huggingface.cloud", huggingfacehub_api_token=os.environ["HF_TOKEN"], task="text-generation", model_kwargs={ "temperature": 0.1, "max_new_tokens": 488, }, ) retriever = vectorstore.as_retriever(search_kwargs={"k": 2}) prompt_template = """\ Use the provided context to answer the user's question. If you don't know the answer, say you don't know. Context: {context} Question: {question} Answer in french and do not start with 'Réponse:' """ rag_prompt = ChatPromptTemplate.from_template(prompt_template) entry_point_chain = { "context": lambda input: format_docs(input["documents"]), "question": itemgetter("question"), } rag_chain = entry_point_chain | rag_prompt | hf_llm | StrOutputParser() rag_chain_with_sources = RunnableParallel( {"documents": retriever, "question": RunnablePassthrough()} ) | { "documents": lambda input: [doc.metadata for doc in input["documents"]], "answer": rag_chain, } return rag_chain_with_sources
[ "langchain.llms.huggingface_endpoint.HuggingFaceEndpoint", "langchain.schema.StrOutputParser", "langchain.callbacks.manager.AsyncCallbackManager", "langchain_core.runnables.RunnablePassthrough", "langchain.prompts.ChatPromptTemplate.from_template" ]
[((1270, 1283), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1281, 1283), False, 'from dotenv import load_dotenv\n'), ((1298, 1322), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (1318, 1322), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1346, 1386), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[question_handler]'], {}), '([question_handler])\n', (1366, 1386), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1408, 1446), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (1428, 1446), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1461, 1707), 'langchain.llms.huggingface_endpoint.HuggingFaceEndpoint', 'HuggingFaceEndpoint', ([], {'endpoint_url': '"""https://euo6lqs9bqkddhci.us-east-1.aws.endpoints.huggingface.cloud"""', 'huggingfacehub_api_token': "os.environ['HF_TOKEN']", 'task': '"""text-generation"""', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 488}"}), "(endpoint_url=\n 'https://euo6lqs9bqkddhci.us-east-1.aws.endpoints.huggingface.cloud',\n huggingfacehub_api_token=os.environ['HF_TOKEN'], task='text-generation',\n model_kwargs={'temperature': 0.1, 'max_new_tokens': 488})\n", (1480, 1707), False, 'from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint\n'), ((2116, 2165), 'langchain.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2148, 2165), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((2279, 2301), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (2289, 2301), False, 'from operator import itemgetter\n'), ((2367, 2384), 'langchain.schema.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2382, 2384), False, 'from langchain.schema import StrOutputParser\n'), ((2478, 2499), 'langchain_core.runnables.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (2497, 2499), False, 'from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n')]
"""Push and pull to the LangChain Hub.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional from langchain.load.dump import dumps from langchain.load.load import loads if TYPE_CHECKING: from langchainhub import Client def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client: try: from langchainhub import Client except ImportError as e: raise ImportError( "Could not import langchainhub, please install with `pip install " "langchainhub`." ) from e # Client logic will also attempt to load URL/key from environment variables return Client(api_url, api_key=api_key) def push( repo_full_name: str, object: Any, *, api_url: Optional[str] = None, api_key: Optional[str] = None, parent_commit_hash: Optional[str] = "latest", new_repo_is_public: bool = False, new_repo_description: str = "", ) -> str: """ Pushes an object to the hub and returns the URL it can be viewed at in a browser. :param repo_full_name: The full name of the repo to push to in the format of `owner/repo`. :param object: The LangChain to serialize and push to the hub. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. :param parent_commit_hash: The commit hash of the parent commit to push to. Defaults to the latest commit automatically. :param new_repo_is_public: Whether the repo should be public. Defaults to False (Private by default). :param new_repo_description: The description of the repo. Defaults to an empty string. """ client = _get_client(api_url=api_url, api_key=api_key) manifest_json = dumps(object) message = client.push( repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash, new_repo_is_public=new_repo_is_public, new_repo_description=new_repo_description, ) return message def pull( owner_repo_commit: str, *, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> Any: """ Pulls an object from the hub and returns it as a LangChain object. :param owner_repo_commit: The full name of the repo to pull from in the format of `owner/repo:commit_hash`. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. """ client = _get_client(api_url=api_url, api_key=api_key) resp: str = client.pull(owner_repo_commit) return loads(resp)
[ "langchain.load.load.loads", "langchainhub.Client", "langchain.load.dump.dumps" ]
[((671, 703), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (677, 703), False, 'from langchainhub import Client\n'), ((1907, 1920), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1912, 1920), False, 'from langchain.load.dump import dumps\n'), ((2857, 2868), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (2862, 2868), False, 'from langchain.load.load import loads\n')]
from Google import Create_Service import gspread import langchain from langchain.chat_models import ChatOpenAI import pymysql from langchain.document_loaders.csv_loader import UnstructuredCSVLoader from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain import PromptTemplate, LLMChain import os import csv from twilio.rest import Client from dotenv import load_dotenv pymysql.install_as_MySQLdb() load_dotenv() OPENAI_API_TOKEN=os.getenv("OPENAI_API_TOKEN") os.environ["OPENAI_API_TOKEN"] = OPENAI_API_TOKEN os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="./credentials.json" # Your Account SID from twilio.com/console account_sid = os.environ["TWILIO_ACCOUNT_SID"] # Your Auth Token from twilio.com/consoles auth_token = os.environ["TWILIO_AUTH_TOKEN"] client = Client(account_sid, auth_token) chat_llm=ChatOpenAI(model="gpt-4", openai_api_key=OPENAI_API_TOKEN) #connect to the database connection = pymysql.connect(host=os.environ["DB_HOST"], user=os.environ["DB_USERNAME"], password=os.environ["DB_PASSWORD"], database= os.environ["DATABASE"], ssl_ca='./cert.pem' # From AWS's cert.pem ) #using service account credentials json file to instantiate the Service_Account=gspread.service_account("credentials.json") cursor=connection.cursor() #get all clients query = """ SELECT pers.phone, pers.fname, pr.programName FROM Client c JOIN Profile pr ON c.profileId = pr.id JOIN Personal pers ON c.personalId = pers.id WHERE NOT EXISTS ( SELECT * FROM Note n WHERE c.id = n.clientId AND n.note = 'Cancelled' ); """ cursor.execute(query) #list of tuples where index 0 is number, index 1 is name, index 2 is program Name client_programs = cursor.fetchall() def fetch_user(pr_name, first_name): spread=Service_Account.open(pr_name) #retrieve client diary using their program name user_info=spread.worksheet("Goals/Info") #create a file to store the contents of their goals with open("/tmp/context.csv", "w", newline='') as f: writer=csv.writer(f) writer.writerows(user_info.get_all_values()) #use unstuctured loader to initialize the data source for RAG docs=UnstructuredCSVLoader('/tmp/context.csv').load() #viola we have our loader, now what is the best way to summarize the user content as context for #another model to use it as the very specific nature of response # Define summarization prompt, using Directional Stimulus Prompting # which utilizes stimulus/hints which are keywords in this case, relating to the client's ideal workout/lifestyle mantra to provide fine #question specific guidance to GPT-4 for generating a summary prompt_template = """Write a concise summary of the following client's workout goals based on the hint: "{text}" Hint: Goals; Change; Optimization; Improvement; Consistency; Frequency; Healthy; Unhealthy; Weight Loss; Fat Loss; Weight Gain; Muscle Gain; Diet; Eating; Sleep. CONCISE SUMMARY:""" prompt = PromptTemplate.from_template(prompt_template) llm_chain = LLMChain(llm=chat_llm, prompt=prompt) # Define StuffDocumentsChain summary_chain = StuffDocumentsChain( llm_chain=llm_chain, document_variable_name="text" ) user_workout_context=summary_chain.run(docs) #This is the final LLMChain prompt template, which will serve as a system prompt and the purpose of our daily motivational messages temp=""" You are BolicBot, a super passionate and intelligent fitness coach and motivator, who can capture the hearts and minds of your clients by helping them push out of their comfort zone. Your sole purpose for existence is to drive people to be better versions of themselves. Using your client's information here: {context} , and their name:{name}, send them a very short and concise text, motivating them to push through their very specific problems. """ prompt=PromptTemplate(input_variables=["context", "name"], template=temp) prompt.format(context=user_workout_context, name=first_name) chain=LLMChain(llm=chat_llm, prompt=prompt) return chain.run({"context": user_workout_context, "name": first_name}) for each_client in client_programs: answer=fetch_user(each_client[2], each_client[1]) client.messages.create(to=each_client[0], from_=os.getenv("BOLIC_NUMBER"), body=answer) cursor.close() connection.close()
[ "langchain.LLMChain", "langchain.document_loaders.csv_loader.UnstructuredCSVLoader", "langchain.chat_models.ChatOpenAI", "langchain.PromptTemplate.from_template", "langchain.chains.combine_documents.stuff.StuffDocumentsChain", "langchain.PromptTemplate" ]
[((402, 430), 'pymysql.install_as_MySQLdb', 'pymysql.install_as_MySQLdb', ([], {}), '()\n', (428, 430), False, 'import pymysql\n'), ((433, 446), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (444, 446), False, 'from dotenv import load_dotenv\n'), ((465, 494), 'os.getenv', 'os.getenv', (['"""OPENAI_API_TOKEN"""'], {}), "('OPENAI_API_TOKEN')\n", (474, 494), False, 'import os\n'), ((811, 842), 'twilio.rest.Client', 'Client', (['account_sid', 'auth_token'], {}), '(account_sid, auth_token)\n', (817, 842), False, 'from twilio.rest import Client\n'), ((854, 912), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4"""', 'openai_api_key': 'OPENAI_API_TOKEN'}), "(model='gpt-4', openai_api_key=OPENAI_API_TOKEN)\n", (864, 912), False, 'from langchain.chat_models import ChatOpenAI\n'), ((953, 1126), 'pymysql.connect', 'pymysql.connect', ([], {'host': "os.environ['DB_HOST']", 'user': "os.environ['DB_USERNAME']", 'password': "os.environ['DB_PASSWORD']", 'database': "os.environ['DATABASE']", 'ssl_ca': '"""./cert.pem"""'}), "(host=os.environ['DB_HOST'], user=os.environ['DB_USERNAME'],\n password=os.environ['DB_PASSWORD'], database=os.environ['DATABASE'],\n ssl_ca='./cert.pem')\n", (968, 1126), False, 'import pymysql\n'), ((1385, 1428), 'gspread.service_account', 'gspread.service_account', (['"""credentials.json"""'], {}), "('credentials.json')\n", (1408, 1428), False, 'import gspread\n'), ((3246, 3291), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (3274, 3291), False, 'from langchain import PromptTemplate, LLMChain\n'), ((3312, 3349), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'chat_llm', 'prompt': 'prompt'}), '(llm=chat_llm, prompt=prompt)\n', (3320, 3349), False, 'from langchain import PromptTemplate, LLMChain\n'), ((3413, 3484), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_variable_name': '"""text"""'}), "(llm_chain=llm_chain, document_variable_name='text')\n", (3432, 3484), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((4272, 4338), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'name']", 'template': 'temp'}), "(input_variables=['context', 'name'], template=temp)\n", (4286, 4338), False, 'from langchain import PromptTemplate, LLMChain\n'), ((4422, 4459), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'chat_llm', 'prompt': 'prompt'}), '(llm=chat_llm, prompt=prompt)\n', (4430, 4459), False, 'from langchain import PromptTemplate, LLMChain\n'), ((2216, 2229), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2226, 2229), False, 'import csv\n'), ((2376, 2417), 'langchain.document_loaders.csv_loader.UnstructuredCSVLoader', 'UnstructuredCSVLoader', (['"""/tmp/context.csv"""'], {}), "('/tmp/context.csv')\n", (2397, 2417), False, 'from langchain.document_loaders.csv_loader import UnstructuredCSVLoader\n'), ((4692, 4717), 'os.getenv', 'os.getenv', (['"""BOLIC_NUMBER"""'], {}), "('BOLIC_NUMBER')\n", (4701, 4717), False, 'import os\n')]
"""A bot that uses either GPT-4 or ChatGPT to generate responses without any hidden prompts.""" from typing import AsyncGenerator from langchain.chat_models import PromptLayerChatOpenAI from langchain.schema import ChatMessage from mergedbots import MergedMessage, MergedBot from mergedbots.ext.langchain_integration import LangChainParagraphStreamingCallback from experiments.common.bot_manager import SLOW_GPT_MODEL, bot_manager @bot_manager.create_bot( handle="PlainGPT", description=( "A bot that uses either GPT-4 or ChatGPT to generate responses. Useful when the user seeks information and " "needs factual answers." ), ) async def plain_gpt(bot: MergedBot, message: MergedMessage) -> AsyncGenerator[MergedMessage, None]: """A bot that uses either GPT-4 or ChatGPT to generate responses without any hidden prompts.""" if not message.previous_msg and not message.is_visible_to_bots: yield await message.service_followup_as_final_response(bot, "```\nCONVERSATION RESTARTED\n```") return model_name = SLOW_GPT_MODEL yield await message.service_followup_for_user(bot, f"`{model_name}`") print() paragraph_streaming = LangChainParagraphStreamingCallback(bot, message, verbose=True) chat_llm = PromptLayerChatOpenAI( model_name=model_name, temperature=0.0, streaming=True, callbacks=[paragraph_streaming], model_kwargs={"user": str(message.originator.uuid)}, pl_tags=["mb_plain_gpt"], ) conversation = await message.get_full_conversion() async for msg in paragraph_streaming.stream_from_coroutine( chat_llm.agenerate( [ [ ChatMessage( role="user" if msg.is_sent_by_originator else "assistant", content=msg.content, ) for msg in conversation ] ], ) ): yield msg print()
[ "langchain.schema.ChatMessage" ]
[((436, 631), 'experiments.common.bot_manager.bot_manager.create_bot', 'bot_manager.create_bot', ([], {'handle': '"""PlainGPT"""', 'description': '"""A bot that uses either GPT-4 or ChatGPT to generate responses. Useful when the user seeks information and needs factual answers."""'}), "(handle='PlainGPT', description=\n 'A bot that uses either GPT-4 or ChatGPT to generate responses. Useful when the user seeks information and needs factual answers.'\n )\n", (458, 631), False, 'from experiments.common.bot_manager import SLOW_GPT_MODEL, bot_manager\n'), ((1193, 1256), 'mergedbots.ext.langchain_integration.LangChainParagraphStreamingCallback', 'LangChainParagraphStreamingCallback', (['bot', 'message'], {'verbose': '(True)'}), '(bot, message, verbose=True)\n', (1228, 1256), False, 'from mergedbots.ext.langchain_integration import LangChainParagraphStreamingCallback\n'), ((1716, 1811), 'langchain.schema.ChatMessage', 'ChatMessage', ([], {'role': "('user' if msg.is_sent_by_originator else 'assistant')", 'content': 'msg.content'}), "(role='user' if msg.is_sent_by_originator else 'assistant',\n content=msg.content)\n", (1727, 1811), False, 'from langchain.schema import ChatMessage\n')]
from flask import Flask, request, render_template, jsonify, make_response import os from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage from llama_index.embeddings import LangchainEmbedding from llama_index.llms.palm import PaLM from langchain.embeddings.gpt4all import GPT4AllEmbeddings from llama_index import ServiceContext from llama_index import Prompt from flask_cors import CORS from dotenv import load_dotenv load_dotenv() embed_model = LangchainEmbedding(GPT4AllEmbeddings()) palm_api_key = os.getenv('GOOGLE_API_KEY') llm = PaLM(api_key=palm_api_key) app = Flask(__name__) CORS(app) index = None storage_context = StorageContext.from_defaults(persist_dir='./storage') service_context = ServiceContext.from_defaults( llm=llm, chunk_size=8000, chunk_overlap=20, embed_model=embed_model) # index = VectorStoreIndex.from_documents(documents, service_context=service_context) # load index index = load_index_from_storage( storage_context, service_context=service_context,) # template = ( # "We have provided context information below. \n" # "---------------------\n" # "{context_str}" # "\n---------------------\n" # "Given this information, please answer the question and if you don't know the answer then kindly reply 'I am not sure about it.': {query_str}\n" # "---------------------\n" # "Do not guess the answer from your side. \n" # ) # qa_template = Prompt(template) @app.route("/") def home(): return render_template('index.html') @app.route("/query", methods=["GET"]) def query_index(): global index query_text = request.args.get("text", None) if query_text is None: return "No text found, please include a ?text=blah parameter in the URL", 400 query_engine = index.as_query_engine() response = query_engine.query(query_text) response_json = { "text": str(response) } return make_response(jsonify(response_json)), 200 # return str(response), 200 if __name__ == "__main__": app.run(debug=True)
[ "langchain.embeddings.gpt4all.GPT4AllEmbeddings" ]
[((470, 483), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (481, 483), False, 'from dotenv import load_dotenv\n'), ((555, 582), 'os.getenv', 'os.getenv', (['"""GOOGLE_API_KEY"""'], {}), "('GOOGLE_API_KEY')\n", (564, 582), False, 'import os\n'), ((590, 616), 'llama_index.llms.palm.PaLM', 'PaLM', ([], {'api_key': 'palm_api_key'}), '(api_key=palm_api_key)\n', (594, 616), False, 'from llama_index.llms.palm import PaLM\n'), ((623, 638), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (628, 638), False, 'from flask import Flask, request, render_template, jsonify, make_response\n'), ((639, 648), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (643, 648), False, 'from flask_cors import CORS\n'), ((683, 736), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (711, 736), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((755, 856), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'chunk_size': '(8000)', 'chunk_overlap': '(20)', 'embed_model': 'embed_model'}), '(llm=llm, chunk_size=8000, chunk_overlap=20,\n embed_model=embed_model)\n', (783, 856), False, 'from llama_index import ServiceContext\n'), ((965, 1038), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (988, 1038), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((518, 537), 'langchain.embeddings.gpt4all.GPT4AllEmbeddings', 'GPT4AllEmbeddings', ([], {}), '()\n', (535, 537), False, 'from langchain.embeddings.gpt4all import GPT4AllEmbeddings\n'), ((1520, 1549), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1535, 1549), False, 'from flask import Flask, request, render_template, jsonify, make_response\n'), ((1643, 1673), 'flask.request.args.get', 'request.args.get', (['"""text"""', 'None'], {}), "('text', None)\n", (1659, 1673), False, 'from flask import Flask, request, render_template, jsonify, make_response\n'), ((1959, 1981), 'flask.jsonify', 'jsonify', (['response_json'], {}), '(response_json)\n', (1966, 1981), False, 'from flask import Flask, request, render_template, jsonify, make_response\n')]
import time #← 実行時間を計測するためにtimeモジュールをインポート import langchain from langchain.cache import InMemoryCache #← InMemoryCacheをインポート from langchain.chat_models import ChatOpenAI from langchain.schema import HumanMessage langchain.llm_cache = InMemoryCache() #← llm_cacheにInMemoryCacheを設定 chat = ChatOpenAI() start = time.time() #← 実行開始時間を記録 result = chat([ #← 一度目の実行を行う HumanMessage(content="こんにちは!") ]) end = time.time() #← 実行終了時間を記録 print(result.content) print(f"実行時間: {end - start}秒") start = time.time() #← 実行開始時間を記録 result = chat([ #← 同じ内容で二度目の実行を行うことでキャッシュが利用され、即時に実行完了している HumanMessage(content="こんにちは!") ]) end = time.time() #← 実行終了時間を記録 print(result.content) print(f"実行時間: {end - start}秒")
[ "langchain.cache.InMemoryCache", "langchain.schema.HumanMessage", "langchain.chat_models.ChatOpenAI" ]
[((237, 252), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (250, 252), False, 'from langchain.cache import InMemoryCache\n'), ((291, 303), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (301, 303), False, 'from langchain.chat_models import ChatOpenAI\n'), ((312, 323), 'time.time', 'time.time', ([], {}), '()\n', (321, 323), False, 'import time\n'), ((411, 422), 'time.time', 'time.time', ([], {}), '()\n', (420, 422), False, 'import time\n'), ((498, 509), 'time.time', 'time.time', ([], {}), '()\n', (507, 509), False, 'import time\n'), ((627, 638), 'time.time', 'time.time', ([], {}), '()\n', (636, 638), False, 'import time\n'), ((370, 400), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""こんにちは!"""'}), "(content='こんにちは!')\n", (382, 400), False, 'from langchain.schema import HumanMessage\n'), ((586, 616), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""こんにちは!"""'}), "(content='こんにちは!')\n", (598, 616), False, 'from langchain.schema import HumanMessage\n')]
import langchain import openai from dotenv import load_dotenv from langchain.chains import ConversationChain from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory from langchain.schema import HumanMessage load_dotenv() langchain.verbose = True # openai.log = "debug" chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) memory = ConversationBufferMemory() memory.chat_memory.add_user_message("Hi! I'm Oshima.") memory.chat_memory.add_ai_message("Whats up?") # Memory のチュートリアルの通り以下のように ConversationChain を使うと、 # role: user に全ての履歴が含まれてしまい、Chat Completions API の使い方として適切ではなくなる conversation = ConversationChain(llm=chat, memory=memory) conversation_result = conversation.predict(input="Do you know my name?") print(conversation_result) # 自前で履歴を与えると、role: assistant を活用した、Chat Completions API として適切なプロンプトになる # messages = memory.chat_memory.messages # messages.append(HumanMessage(content="Do you know my name?")) # raw_chat_result = chat(messages) # print(raw_chat_result.content)
[ "langchain.chains.ConversationChain", "langchain.memory.ConversationBufferMemory", "langchain.chat_models.ChatOpenAI" ]
[((251, 264), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (262, 264), False, 'from dotenv import load_dotenv\n'), ((322, 375), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (332, 375), False, 'from langchain.chat_models import ChatOpenAI\n'), ((386, 412), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (410, 412), False, 'from langchain.memory import ConversationBufferMemory\n'), ((647, 689), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'llm': 'chat', 'memory': 'memory'}), '(llm=chat, memory=memory)\n', (664, 689), False, 'from langchain.chains import ConversationChain\n')]
from __future__ import annotations import asyncio import functools import logging import os import uuid from concurrent.futures import ThreadPoolExecutor from contextlib import asynccontextmanager, contextmanager from contextvars import ContextVar from typing import ( TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Dict, Generator, List, Optional, Sequence, Type, TypeVar, Union, cast, ) from uuid import UUID from tenacity import RetryCallState import langchain from langchain.callbacks.base import ( BaseCallbackHandler, BaseCallbackManager, Callbacks, ChainManagerMixin, LLMManagerMixin, RetrieverManagerMixin, RunManagerMixin, ToolManagerMixin, ) from langchain.callbacks.openai_info import OpenAICallbackHandler from langchain.callbacks.stdout import StdOutCallbackHandler from langchain.callbacks.tracers import run_collector from langchain.callbacks.tracers.langchain import ( LangChainTracer, ) from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1 from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler from langchain.callbacks.tracers.wandb import WandbTracer from langchain.schema import ( AgentAction, AgentFinish, Document, LLMResult, ) from langchain.schema.messages import BaseMessage, get_buffer_string from langchain.schema.output import ChatGenerationChunk, GenerationChunk if TYPE_CHECKING: from langsmith import Client as LangSmithClient logger = logging.getLogger(__name__) openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar( "openai_callback", default=None ) tracing_callback_var: ContextVar[ Optional[LangChainTracerV1] ] = ContextVar( # noqa: E501 "tracing_callback", default=None ) wandb_tracing_callback_var: ContextVar[ Optional[WandbTracer] ] = ContextVar( # noqa: E501 "tracing_wandb_callback", default=None ) tracing_v2_callback_var: ContextVar[ Optional[LangChainTracer] ] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) run_collector_var: ContextVar[ Optional[run_collector.RunCollectorCallbackHandler] ] = ContextVar( # noqa: E501 "run_collector", default=None ) def _get_debug() -> bool: return langchain.debug @contextmanager def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]: """Get the OpenAI callback handler in a context manager. which conveniently exposes token and cost information. Returns: OpenAICallbackHandler: The OpenAI callback handler. Example: >>> with get_openai_callback() as cb: ... # Use the OpenAI callback handler """ cb = OpenAICallbackHandler() openai_callback_var.set(cb) yield cb openai_callback_var.set(None) @contextmanager def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Get the Deprecated LangChainTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: TracerSessionV1: The LangChainTracer session. Example: >>> with tracing_enabled() as session: ... # Use the LangChainTracer session """ cb = LangChainTracerV1() session = cast(TracerSessionV1, cb.load_session(session_name)) tracing_callback_var.set(cb) yield session tracing_callback_var.set(None) @contextmanager def wandb_tracing_enabled( session_name: str = "default", ) -> Generator[None, None, None]: """Get the WandbTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: None Example: >>> with wandb_tracing_enabled() as session: ... # Use the WandbTracer session """ cb = WandbTracer() wandb_tracing_callback_var.set(cb) yield None wandb_tracing_callback_var.set(None) @contextmanager def tracing_v2_enabled( project_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, client: Optional[LangSmithClient] = None, ) -> Generator[LangChainTracer, None, None]: """Instruct LangChain to log all runs in context to LangSmith. Args: project_name (str, optional): The name of the project. Defaults to "default". example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The tags to add to the run. Defaults to None. Returns: None Example: >>> with tracing_v2_enabled(): ... # LangChain code will automatically be traced You can use this to fetch the LangSmith run URL: >>> with tracing_v2_enabled() as cb: ... chain.invoke("foo") ... run_url = cb.get_run_url() """ if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer( example_id=example_id, project_name=project_name, tags=tags, client=client, ) tracing_v2_callback_var.set(cb) yield cb tracing_v2_callback_var.set(None) @contextmanager def collect_runs() -> Generator[run_collector.RunCollectorCallbackHandler, None, None]: """Collect all run traces in context. Returns: run_collector.RunCollectorCallbackHandler: The run collector callback handler. Example: >>> with collect_runs() as runs_cb: chain.invoke("foo") run_id = runs_cb.traced_runs[0].id """ cb = run_collector.RunCollectorCallbackHandler() run_collector_var.set(cb) yield cb run_collector_var.set(None) @contextmanager def trace_as_chain_group( group_name: str, callback_manager: Optional[CallbackManager] = None, *, inputs: Optional[Dict[str, Any]] = None, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, ) -> Generator[CallbackManagerForChainGroup, None, None]: """Get a callback manager for a chain group in a context manager. Useful for grouping different calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. callback_manager (CallbackManager, optional): The callback manager to use. inputs (Dict[str, Any], optional): The inputs to the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. run_id (UUID, optional): The ID of the run. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: CallbackManagerForChainGroup: The callback manager for the chain group. Example: .. code-block:: python llm_input = "Foo" with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager: # Use the callback manager for the chain group res = llm.predict(llm_input, callbacks=manager) manager.on_chain_end({"output": res}) """ # noqa: E501 cb = cast( Callbacks, [ LangChainTracer( project_name=project_name, example_id=example_id, ) ] if callback_manager is None else callback_manager, ) cm = CallbackManager.configure( inheritable_callbacks=cb, inheritable_tags=tags, ) run_manager = cm.on_chain_start({"name": group_name}, inputs or {}, run_id=run_id) child_cm = run_manager.get_child() group_cm = CallbackManagerForChainGroup( child_cm.handlers, child_cm.inheritable_handlers, child_cm.parent_run_id, parent_run_manager=run_manager, tags=child_cm.tags, inheritable_tags=child_cm.inheritable_tags, metadata=child_cm.metadata, inheritable_metadata=child_cm.inheritable_metadata, ) try: yield group_cm except Exception as e: if not group_cm.ended: run_manager.on_chain_error(e) raise e else: if not group_cm.ended: run_manager.on_chain_end({}) @asynccontextmanager async def atrace_as_chain_group( group_name: str, callback_manager: Optional[AsyncCallbackManager] = None, *, inputs: Optional[Dict[str, Any]] = None, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, ) -> AsyncGenerator[AsyncCallbackManagerForChainGroup, None]: """Get an async callback manager for a chain group in a context manager. Useful for grouping different async calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. callback_manager (AsyncCallbackManager, optional): The async callback manager to use, which manages tracing and other callback behavior. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. run_id (UUID, optional): The ID of the run. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: AsyncCallbackManager: The async callback manager for the chain group. Example: .. code-block:: python llm_input = "Foo" async with atrace_as_chain_group("group_name", inputs={"input": llm_input}) as manager: # Use the async callback manager for the chain group res = await llm.apredict(llm_input, callbacks=manager) await manager.on_chain_end({"output": res}) """ # noqa: E501 cb = cast( Callbacks, [ LangChainTracer( project_name=project_name, example_id=example_id, ) ] if callback_manager is None else callback_manager, ) cm = AsyncCallbackManager.configure(inheritable_callbacks=cb, inheritable_tags=tags) run_manager = await cm.on_chain_start( {"name": group_name}, inputs or {}, run_id=run_id ) child_cm = run_manager.get_child() group_cm = AsyncCallbackManagerForChainGroup( child_cm.handlers, child_cm.inheritable_handlers, child_cm.parent_run_id, parent_run_manager=run_manager, tags=child_cm.tags, inheritable_tags=child_cm.inheritable_tags, metadata=child_cm.metadata, inheritable_metadata=child_cm.inheritable_metadata, ) try: yield group_cm except Exception as e: if not group_cm.ended: await run_manager.on_chain_error(e) raise e else: if not group_cm.ended: await run_manager.on_chain_end({}) def _handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for CallbackManager.""" coros: List[Coroutine[Any, Any, Any]] = [] try: message_strings: Optional[List[str]] = None for handler in handlers: try: if ignore_condition_name is None or not getattr( handler, ignore_condition_name ): event = getattr(handler, event_name)(*args, **kwargs) if asyncio.iscoroutine(event): coros.append(event) except NotImplementedError as e: if event_name == "on_chat_model_start": if message_strings is None: message_strings = [get_buffer_string(m) for m in args[1]] _handle_event( [handler], "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: handler_name = handler.__class__.__name__ logger.warning( f"NotImplementedError in {handler_name}.{event_name}" f" callback: {e}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) if handler.raise_error: raise e finally: if coros: try: # Raises RuntimeError if there is no current event loop. asyncio.get_running_loop() loop_running = True except RuntimeError: loop_running = False if loop_running: # If we try to submit this coroutine to the running loop # we end up in a deadlock, as we'd have gotten here from a # running coroutine, which we cannot interrupt to run this one. # The solution is to create a new loop in a new thread. with ThreadPoolExecutor(1) as executor: executor.submit(_run_coros, coros).result() else: _run_coros(coros) def _run_coros(coros: List[Coroutine[Any, Any, Any]]) -> None: if hasattr(asyncio, "Runner"): # Python 3.11+ # Run the coroutines in a new event loop, taking care to # - install signal handlers # - run pending tasks scheduled by `coros` # - close asyncgens and executors # - close the loop with asyncio.Runner() as runner: # Run the coroutine, get the result for coro in coros: runner.run(coro) # Run pending tasks scheduled by coros until they are all done while pending := asyncio.all_tasks(runner.get_loop()): runner.run(asyncio.wait(pending)) else: # Before Python 3.11 we need to run each coroutine in a new event loop # as the Runner api is not available. for coro in coros: asyncio.run(coro) async def _ahandle_event_for_handler( handler: BaseCallbackHandler, event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) else: if handler.run_inline: event(*args, **kwargs) else: await asyncio.get_event_loop().run_in_executor( None, functools.partial(event, *args, **kwargs) ) except NotImplementedError as e: if event_name == "on_chat_model_start": message_strings = [get_buffer_string(m) for m in args[1]] await _ahandle_event_for_handler( handler, "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"NotImplementedError in {handler.__class__.__name__}.{event_name}" f" callback: {e}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) if handler.raise_error: raise e async def _ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for AsyncCallbackManager.""" for handler in [h for h in handlers if h.run_inline]: await _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) await asyncio.gather( *( _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) for handler in handlers if not handler.run_inline ) ) BRM = TypeVar("BRM", bound="BaseRunManager") class BaseRunManager(RunManagerMixin): """Base class for run manager (a bound callback manager).""" def __init__( self, *, run_id: UUID, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler], parent_run_id: Optional[UUID] = None, tags: Optional[List[str]] = None, inheritable_tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, ) -> None: """Initialize the run manager. Args: run_id (UUID): The ID of the run. handlers (List[BaseCallbackHandler]): The list of handlers. inheritable_handlers (List[BaseCallbackHandler]): The list of inheritable handlers. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. tags (Optional[List[str]]): The list of tags. inheritable_tags (Optional[List[str]]): The list of inheritable tags. metadata (Optional[Dict[str, Any]]): The metadata. inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata. """ self.run_id = run_id self.handlers = handlers self.inheritable_handlers = inheritable_handlers self.parent_run_id = parent_run_id self.tags = tags or [] self.inheritable_tags = inheritable_tags or [] self.metadata = metadata or {} self.inheritable_metadata = inheritable_metadata or {} @classmethod def get_noop_manager(cls: Type[BRM]) -> BRM: """Return a manager that doesn't perform any operations. Returns: BaseRunManager: The noop manager. """ return cls( run_id=uuid.uuid4(), handlers=[], inheritable_handlers=[], tags=[], inheritable_tags=[], metadata={}, inheritable_metadata={}, ) class RunManager(BaseRunManager): """Sync Run Manager.""" def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_retry( self, retry_state: RetryCallState, **kwargs: Any, ) -> None: _handle_event( self.handlers, "on_retry", "ignore_retry", retry_state, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class ParentRunManager(RunManager): """Sync Parent Run Manager.""" def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) manager.add_metadata(self.inheritable_metadata) if tag is not None: manager.add_tags([tag], False) return manager class AsyncRunManager(BaseRunManager): """Async Run Manager.""" async def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_retry( self, retry_state: RetryCallState, **kwargs: Any, ) -> None: await _ahandle_event( self.handlers, "on_retry", "ignore_retry", retry_state, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class AsyncParentRunManager(AsyncRunManager): """Async Parent Run Manager.""" def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: AsyncCallbackManager: The child callback manager. """ manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) manager.add_metadata(self.inheritable_metadata) if tag is not None: manager.add_tags([tag], False) return manager class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): """Callback manager for LLM run.""" def on_llm_new_token( self, token: str, *, chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ _handle_event( self.handlers, "on_llm_new_token", "ignore_llm", token=token, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, chunk=chunk, **kwargs, ) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ _handle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_llm_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): """Async callback manager for LLM run.""" async def on_llm_new_token( self, token: str, *, chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ await _ahandle_event( self.handlers, "on_llm_new_token", "ignore_llm", token, chunk=chunk, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ await _ahandle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_llm_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin): """Callback manager for chain run.""" def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None: """Run when chain ends running. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ _handle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin): """Async callback manager for chain run.""" async def on_chain_end( self, outputs: Union[Dict[str, Any], Any], **kwargs: Any ) -> None: """Run when chain ends running. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ await _ahandle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin): """Callback manager for tool run.""" def on_tool_end( self, output: str, **kwargs: Any, ) -> None: """Run when tool ends running. Args: output (str): The output of the tool. """ _handle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_tool_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin): """Async callback manager for tool run.""" async def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running. Args: output (str): The output of the tool. """ await _ahandle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_tool_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class CallbackManagerForRetrieverRun(ParentRunManager, RetrieverManagerMixin): """Callback manager for retriever run.""" def on_retriever_end( self, documents: Sequence[Document], **kwargs: Any, ) -> None: """Run when retriever ends running.""" _handle_event( self.handlers, "on_retriever_end", "ignore_retriever", documents, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) def on_retriever_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when retriever errors.""" _handle_event( self.handlers, "on_retriever_error", "ignore_retriever", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class AsyncCallbackManagerForRetrieverRun( AsyncParentRunManager, RetrieverManagerMixin, ): """Async callback manager for retriever run.""" async def on_retriever_end( self, documents: Sequence[Document], **kwargs: Any ) -> None: """Run when retriever ends running.""" await _ahandle_event( self.handlers, "on_retriever_end", "ignore_retriever", documents, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) async def on_retriever_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when retriever errors.""" await _ahandle_event( self.handlers, "on_retriever_error", "ignore_retriever", error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs, ) class CallbackManager(BaseCallbackManager): """Callback manager that handles callbacks from LangChain.""" def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each prompt as an LLM run. """ managers = [] for prompt in prompts: run_id_ = uuid.uuid4() _handle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) return managers def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> List[CallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[CallbackManagerForLLMRun]: A callback manager for each list of messages as an LLM run. """ managers = [] for message_list in messages: run_id_ = uuid.uuid4() _handle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) managers.append( CallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) return managers def on_chain_start( self, serialized: Dict[str, Any], inputs: Union[Dict[str, Any], Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Union[Dict[str, Any], Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: CallbackManagerForChainRun: The callback manager for the chain run. """ if run_id is None: run_id = uuid.uuid4() _handle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return CallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: CallbackManagerForToolRun: The callback manager for the tool run. """ if run_id is None: run_id = uuid.uuid4() _handle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return CallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) def on_retriever_start( self, serialized: Dict[str, Any], query: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid.uuid4() _handle_event( self.handlers, "on_retriever_start", "ignore_retriever", serialized, query, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return CallbackManagerForRetrieverRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, local_metadata: Optional[Dict[str, Any]] = None, ) -> CallbackManager: """Configure the callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable metadata. Defaults to None. local_metadata (Optional[Dict[str, Any]], optional): The local metadata. Defaults to None. Returns: CallbackManager: The configured callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, inheritable_metadata, local_metadata, ) class CallbackManagerForChainGroup(CallbackManager): """Callback manager for the chain group.""" def __init__( self, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler] | None = None, parent_run_id: UUID | None = None, *, parent_run_manager: CallbackManagerForChainRun, **kwargs: Any, ) -> None: super().__init__( handlers, inheritable_handlers, parent_run_id, **kwargs, ) self.parent_run_manager = parent_run_manager self.ended = False def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None: """Run when traced chain group ends. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ self.ended = True return self.parent_run_manager.on_chain_end(outputs, **kwargs) def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ self.ended = True return self.parent_run_manager.on_chain_error(error, **kwargs) class AsyncCallbackManager(BaseCallbackManager): """Async callback manager that handles callbacks from LangChain.""" @property def is_async(self) -> bool: """Return whether the handler is async.""" return True async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> List[AsyncCallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. prompts (List[str]): The list of prompts. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each prompt. """ tasks = [] managers = [] for prompt in prompts: run_id_ = uuid.uuid4() tasks.append( _ahandle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, [prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) await asyncio.gather(*tasks) return managers async def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any, ) -> List[AsyncCallbackManagerForLLMRun]: """Run when LLM starts running. Args: serialized (Dict[str, Any]): The serialized LLM. messages (List[List[BaseMessage]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: List[AsyncCallbackManagerForLLMRun]: The list of async callback managers, one for each LLM Run corresponding to each inner message list. """ tasks = [] managers = [] for message_list in messages: run_id_ = uuid.uuid4() tasks.append( _ahandle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, [message_list], run_id=run_id_, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) ) managers.append( AsyncCallbackManagerForLLMRun( run_id=run_id_, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) ) await asyncio.gather(*tasks) return managers async def on_chain_start( self, serialized: Dict[str, Any], inputs: Union[Dict[str, Any], Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForChainRun: """Run when chain starts running. Args: serialized (Dict[str, Any]): The serialized chain. inputs (Union[Dict[str, Any], Any]): The inputs to the chain. run_id (UUID, optional): The ID of the run. Defaults to None. Returns: AsyncCallbackManagerForChainRun: The async callback manager for the chain run. """ if run_id is None: run_id = uuid.uuid4() await _ahandle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return AsyncCallbackManagerForChainRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) async def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForToolRun: """Run when tool starts running. Args: serialized (Dict[str, Any]): The serialized tool. input_str (str): The input to the tool. run_id (UUID, optional): The ID of the run. Defaults to None. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. Returns: AsyncCallbackManagerForToolRun: The async callback manager for the tool run. """ if run_id is None: run_id = uuid.uuid4() await _ahandle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return AsyncCallbackManagerForToolRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) async def on_retriever_start( self, serialized: Dict[str, Any], query: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid.uuid4() await _ahandle_event( self.handlers, "on_retriever_start", "ignore_retriever", serialized, query, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs, ) return AsyncCallbackManagerForRetrieverRun( run_id=run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata, ) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, local_metadata: Optional[Dict[str, Any]] = None, ) -> AsyncCallbackManager: """Configure the async callback manager. Args: inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable metadata. Defaults to None. local_metadata (Optional[Dict[str, Any]], optional): The local metadata. Defaults to None. Returns: AsyncCallbackManager: The configured async callback manager. """ return _configure( cls, inheritable_callbacks, local_callbacks, verbose, inheritable_tags, local_tags, inheritable_metadata, local_metadata, ) class AsyncCallbackManagerForChainGroup(AsyncCallbackManager): """Async callback manager for the chain group.""" def __init__( self, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler] | None = None, parent_run_id: UUID | None = None, *, parent_run_manager: AsyncCallbackManagerForChainRun, **kwargs: Any, ) -> None: super().__init__( handlers, inheritable_handlers, parent_run_id, **kwargs, ) self.parent_run_manager = parent_run_manager self.ended = False async def on_chain_end( self, outputs: Union[Dict[str, Any], Any], **kwargs: Any ) -> None: """Run when traced chain group ends. Args: outputs (Union[Dict[str, Any], Any]): The outputs of the chain. """ self.ended = True await self.parent_run_manager.on_chain_end(outputs, **kwargs) async def on_chain_error( self, error: BaseException, **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ self.ended = True await self.parent_run_manager.on_chain_error(error, **kwargs) T = TypeVar("T", CallbackManager, AsyncCallbackManager) def env_var_is_set(env_var: str) -> bool: """Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise. """ return env_var in os.environ and os.environ[env_var] not in ( "", "0", "false", "False", ) def _configure( callback_manager_cls: Type[T], inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, inheritable_tags: Optional[List[str]] = None, local_tags: Optional[List[str]] = None, inheritable_metadata: Optional[Dict[str, Any]] = None, local_metadata: Optional[Dict[str, Any]] = None, ) -> T: """Configure the callback manager. Args: callback_manager_cls (Type[T]): The callback manager class. inheritable_callbacks (Optional[Callbacks], optional): The inheritable callbacks. Defaults to None. local_callbacks (Optional[Callbacks], optional): The local callbacks. Defaults to None. verbose (bool, optional): Whether to enable verbose mode. Defaults to False. inheritable_tags (Optional[List[str]], optional): The inheritable tags. Defaults to None. local_tags (Optional[List[str]], optional): The local tags. Defaults to None. inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable metadata. Defaults to None. local_metadata (Optional[Dict[str, Any]], optional): The local metadata. Defaults to None. Returns: T: The configured callback manager. """ callback_manager = callback_manager_cls(handlers=[]) if inheritable_callbacks or local_callbacks: if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: inheritable_callbacks_ = inheritable_callbacks or [] callback_manager = callback_manager_cls( handlers=inheritable_callbacks_.copy(), inheritable_handlers=inheritable_callbacks_.copy(), ) else: callback_manager = callback_manager_cls( handlers=inheritable_callbacks.handlers.copy(), inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(), parent_run_id=inheritable_callbacks.parent_run_id, tags=inheritable_callbacks.tags.copy(), inheritable_tags=inheritable_callbacks.inheritable_tags.copy(), metadata=inheritable_callbacks.metadata.copy(), inheritable_metadata=inheritable_callbacks.inheritable_metadata.copy(), ) local_handlers_ = ( local_callbacks if isinstance(local_callbacks, list) else (local_callbacks.handlers if local_callbacks else []) ) for handler in local_handlers_: callback_manager.add_handler(handler, False) if inheritable_tags or local_tags: callback_manager.add_tags(inheritable_tags or []) callback_manager.add_tags(local_tags or [], False) if inheritable_metadata or local_metadata: callback_manager.add_metadata(inheritable_metadata or {}) callback_manager.add_metadata(local_metadata or {}, False) tracer = tracing_callback_var.get() wandb_tracer = wandb_tracing_callback_var.get() open_ai = openai_callback_var.get() tracing_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING") or tracer is not None or env_var_is_set("LANGCHAIN_HANDLER") ) wandb_tracing_enabled_ = ( env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None ) tracer_v2 = tracing_v2_callback_var.get() tracing_v2_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None ) tracer_project = os.environ.get( "LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default") ) run_collector_ = run_collector_var.get() debug = _get_debug() if ( verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or wandb_tracing_enabled_ or open_ai is not None ): if verbose and not any( isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers ): if debug: pass else: callback_manager.add_handler(StdOutCallbackHandler(), False) if debug and not any( isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(ConsoleCallbackHandler(), True) if tracing_enabled_ and not any( isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers ): if tracer: callback_manager.add_handler(tracer, True) else: handler = LangChainTracerV1() handler.load_session(tracer_project) callback_manager.add_handler(handler, True) if wandb_tracing_enabled_ and not any( isinstance(handler, WandbTracer) for handler in callback_manager.handlers ): if wandb_tracer: callback_manager.add_handler(wandb_tracer, True) else: handler = WandbTracer() callback_manager.add_handler(handler, True) if tracing_v2_enabled_ and not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): if tracer_v2: callback_manager.add_handler(tracer_v2, True) else: try: handler = LangChainTracer(project_name=tracer_project) callback_manager.add_handler(handler, True) except Exception as e: logger.warning( "Unable to load requested LangChainTracer." " To disable this warning," " unset the LANGCHAIN_TRACING_V2 environment variables.", e, ) if open_ai is not None and not any( handler is open_ai # direct pointer comparison for handler in callback_manager.handlers ): callback_manager.add_handler(open_ai, True) if run_collector_ is not None and not any( handler is run_collector_ # direct pointer comparison for handler in callback_manager.handlers ): callback_manager.add_handler(run_collector_, False) return callback_manager
[ "langchain.callbacks.stdout.StdOutCallbackHandler", "langchain.callbacks.tracers.wandb.WandbTracer", "langchain.callbacks.openai_info.OpenAICallbackHandler", "langchain.callbacks.tracers.run_collector.RunCollectorCallbackHandler", "langchain.callbacks.tracers.stdout.ConsoleCallbackHandler", "langchain.callbacks.tracers.langchain.LangChainTracer", "langchain.callbacks.tracers.langchain_v1.LangChainTracerV1", "langchain.schema.messages.get_buffer_string" ]
[((1530, 1557), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1547, 1557), False, 'import logging\n'), ((1626, 1669), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1636, 1669), False, 'from contextvars import ContextVar\n'), ((1746, 1790), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback"""'], {'default': 'None'}), "('tracing_callback', default=None)\n", (1756, 1790), False, 'from contextvars import ContextVar\n'), ((1881, 1931), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_wandb_callback"""'], {'default': 'None'}), "('tracing_wandb_callback', default=None)\n", (1891, 1931), False, 'from contextvars import ContextVar\n'), ((2024, 2071), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback_v2"""'], {'default': 'None'}), "('tracing_callback_v2', default=None)\n", (2034, 2071), False, 'from contextvars import ContextVar\n'), ((2183, 2224), 'contextvars.ContextVar', 'ContextVar', (['"""run_collector"""'], {'default': 'None'}), "('run_collector', default=None)\n", (2193, 2224), False, 'from contextvars import ContextVar\n'), ((16857, 16895), 'typing.TypeVar', 'TypeVar', (['"""BRM"""'], {'bound': '"""BaseRunManager"""'}), "('BRM', bound='BaseRunManager')\n", (16864, 16895), False, 'from typing import TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Dict, Generator, List, Optional, Sequence, Type, TypeVar, Union, cast\n'), ((55286, 55337), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'CallbackManager', 'AsyncCallbackManager'], {}), "('T', CallbackManager, AsyncCallbackManager)\n", (55293, 55337), False, 'from typing import TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Dict, Generator, List, Optional, Sequence, Type, TypeVar, Union, cast\n'), ((2714, 2737), 'langchain.callbacks.openai_info.OpenAICallbackHandler', 'OpenAICallbackHandler', ([], {}), '()\n', (2735, 2737), False, 'from langchain.callbacks.openai_info import OpenAICallbackHandler\n'), ((3303, 3322), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (3320, 3322), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((3905, 3918), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (3916, 3918), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((5066, 5161), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'example_id': 'example_id', 'project_name': 'project_name', 'tags': 'tags', 'client': 'client'}), '(example_id=example_id, project_name=project_name, tags=tags,\n client=client)\n', (5081, 5161), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((5695, 5738), 'langchain.callbacks.tracers.run_collector.RunCollectorCallbackHandler', 'run_collector.RunCollectorCallbackHandler', ([], {}), '()\n', (5736, 5738), False, 'from langchain.callbacks.tracers import run_collector\n'), ((5040, 5056), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (5044, 5056), False, 'from uuid import UUID\n'), ((59305, 59351), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_SESSION"""', '"""default"""'], {}), "('LANGCHAIN_SESSION', 'default')\n", (59319, 59351), False, 'import os\n'), ((14159, 14175), 'asyncio.Runner', 'asyncio.Runner', ([], {}), '()\n', (14173, 14175), False, 'import asyncio\n'), ((14666, 14683), 'asyncio.run', 'asyncio.run', (['coro'], {}), '(coro)\n', (14677, 14683), False, 'import asyncio\n'), ((15029, 15063), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['event'], {}), '(event)\n', (15056, 15063), False, 'import asyncio\n'), ((34984, 34996), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (34994, 34996), False, 'import uuid\n'), ((36584, 36596), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (36594, 36596), False, 'import uuid\n'), ((38168, 38180), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (38178, 38180), False, 'import uuid\n'), ((39631, 39643), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (39641, 39643), False, 'import uuid\n'), ((40711, 40723), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (40721, 40723), False, 'import uuid\n'), ((45328, 45340), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (45338, 45340), False, 'import uuid\n'), ((46333, 46355), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (46347, 46355), False, 'import asyncio\n'), ((47153, 47165), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (47163, 47165), False, 'import uuid\n'), ((48178, 48200), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (48192, 48200), False, 'import asyncio\n'), ((48907, 48919), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (48917, 48919), False, 'import uuid\n'), ((50437, 50449), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (50447, 50449), False, 'import uuid\n'), ((51540, 51552), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (51550, 51552), False, 'import uuid\n'), ((7492, 7557), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'project_name', 'example_id': 'example_id'}), '(project_name=project_name, example_id=example_id)\n', (7507, 7557), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((10252, 10317), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'project_name', 'example_id': 'example_id'}), '(project_name=project_name, example_id=example_id)\n', (10267, 10317), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((13167, 13193), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (13191, 13193), False, 'import asyncio\n'), ((18728, 18740), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18738, 18740), False, 'import uuid\n'), ((60081, 60105), 'langchain.callbacks.tracers.stdout.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (60103, 60105), False, 'from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler\n'), ((60395, 60414), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (60412, 60414), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((60810, 60823), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (60821, 60823), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((11911, 11937), 'asyncio.iscoroutine', 'asyncio.iscoroutine', (['event'], {}), '(event)\n', (11930, 11937), False, 'import asyncio\n'), ((13651, 13672), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['(1)'], {}), '(1)\n', (13669, 13672), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((14469, 14490), 'asyncio.wait', 'asyncio.wait', (['pending'], {}), '(pending)\n', (14481, 14490), False, 'import asyncio\n'), ((15510, 15530), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (15527, 15530), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n'), ((59858, 59881), 'langchain.callbacks.stdout.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (59879, 59881), False, 'from langchain.callbacks.stdout import StdOutCallbackHandler\n'), ((61198, 61242), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'tracer_project'}), '(project_name=tracer_project)\n', (61213, 61242), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((15330, 15371), 'functools.partial', 'functools.partial', (['event', '*args'], {}), '(event, *args, **kwargs)\n', (15347, 15371), False, 'import functools\n'), ((12175, 12195), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (12192, 12195), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n'), ((15258, 15282), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15280, 15282), False, 'import asyncio\n')]
import os import streamlit as st import time import langchain from langchain.chains import RetrievalQAWithSourcesChain, RetrievalQA from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import UnstructuredURLLoader from langchain_community.vectorstores import FAISS from langchain_openai import OpenAI, OpenAIEmbeddings import configparser config = configparser.RawConfigParser() config.read('../config.config') openapi_key = config.get('Keys', 'openapi_key') os.environ['OPENAI_API_KEY'] = openapi_key st.title("URL Insighter 🔗🔍") st.sidebar.title("🔗URLs...") urls = [] for i in range(3): url = st.sidebar.text_input(f"URL {i+1}") urls.append(url) folder_name = st.sidebar.text_input('Title') process_url_clicked = st.sidebar.button("Process URLs") main_placeholder = st.empty() llm = OpenAI(temperature=0.9, max_tokens=500) if process_url_clicked: # load data loader = UnstructuredURLLoader(urls=urls) main_placeholder.text("Data Loading...Started...✅✅✅") data = loader.load() # split data text_splitter = RecursiveCharacterTextSplitter( separators=['\n\n', '\n', '.', ','], chunk_size=1000 ) main_placeholder.text("Text Splitter...Started...✅✅✅") docs = text_splitter.split_documents(data) embeddings = OpenAIEmbeddings() vectorstore_openai = FAISS.from_documents(docs, embeddings) main_placeholder.text("Embedding Vector Started Building...✅✅✅") time.sleep(2) vectorstore_openai.save_local(folder_name) query = main_placeholder.text_input("Question: ") if query: if os.path.exists(folder_name): vectorstore = FAISS.load_local(folder_name, OpenAIEmbeddings()) chain = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=vectorstore.as_retriever()) result = chain({"question": query}, return_only_outputs=True) # result will be a dictionary of this format --> {"answer": "", "sources": [] } st.header("Answer") st.write(result["answer"]) sources = result.get("sources", "") if sources: st.subheader("Sources:") sources_list = sources.split("\n") # Split the sources by newline for source in sources_list: st.write(source)
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain_openai.OpenAIEmbeddings", "langchain_community.document_loaders.UnstructuredURLLoader", "langchain_community.vectorstores.FAISS.from_documents", "langchain_openai.OpenAI" ]
[((485, 515), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (513, 515), False, 'import configparser\n'), ((640, 668), 'streamlit.title', 'st.title', (['"""URL Insighter 🔗🔍"""'], {}), "('URL Insighter 🔗🔍')\n", (648, 668), True, 'import streamlit as st\n'), ((669, 697), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""🔗URLs..."""'], {}), "('🔗URLs...')\n", (685, 697), True, 'import streamlit as st\n'), ((810, 840), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Title"""'], {}), "('Title')\n", (831, 840), True, 'import streamlit as st\n'), ((863, 896), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Process URLs"""'], {}), "('Process URLs')\n", (880, 896), True, 'import streamlit as st\n'), ((917, 927), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (925, 927), True, 'import streamlit as st\n'), ((934, 973), 'langchain_openai.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)', 'max_tokens': '(500)'}), '(temperature=0.9, max_tokens=500)\n', (940, 973), False, 'from langchain_openai import OpenAI, OpenAIEmbeddings\n'), ((738, 775), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['f"""URL {i + 1}"""'], {}), "(f'URL {i + 1}')\n", (759, 775), True, 'import streamlit as st\n'), ((1028, 1060), 'langchain_community.document_loaders.UnstructuredURLLoader', 'UnstructuredURLLoader', ([], {'urls': 'urls'}), '(urls=urls)\n', (1049, 1060), False, 'from langchain_community.document_loaders import UnstructuredURLLoader\n'), ((1181, 1269), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'separators': "['\\n\\n', '\\n', '.', ',']", 'chunk_size': '(1000)'}), "(separators=['\\n\\n', '\\n', '.', ','],\n chunk_size=1000)\n", (1211, 1269), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1411, 1429), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1427, 1429), False, 'from langchain_openai import OpenAI, OpenAIEmbeddings\n'), ((1455, 1493), 'langchain_community.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (1475, 1493), False, 'from langchain_community.vectorstores import FAISS\n'), ((1567, 1580), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1577, 1580), False, 'import time\n'), ((1697, 1724), 'os.path.exists', 'os.path.exists', (['folder_name'], {}), '(folder_name)\n', (1711, 1724), False, 'import os\n'), ((2064, 2083), 'streamlit.header', 'st.header', (['"""Answer"""'], {}), "('Answer')\n", (2073, 2083), True, 'import streamlit as st\n'), ((2092, 2118), 'streamlit.write', 'st.write', (["result['answer']"], {}), "(result['answer'])\n", (2100, 2118), True, 'import streamlit as st\n'), ((1778, 1796), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1794, 1796), False, 'from langchain_openai import OpenAI, OpenAIEmbeddings\n'), ((2196, 2220), 'streamlit.subheader', 'st.subheader', (['"""Sources:"""'], {}), "('Sources:')\n", (2208, 2220), True, 'import streamlit as st\n'), ((2356, 2372), 'streamlit.write', 'st.write', (['source'], {}), '(source)\n', (2364, 2372), True, 'import streamlit as st\n')]
from PyPDF2 import PdfReader import os import pandas as pd from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import FAISS from langchain.chains.question_answering import load_qa_chain from langchain.llms import OpenAI from langchain.cache import InMemoryCache import langchain from ocr.ocr import process_pdf from docs.search import doc_search from extract.extract import knowledge_graph from data.processing import rowify from edi.edi_formatter import pandas_to_edi from openai.error import InvalidRequestError langchain.llm_cache = InMemoryCache() llm = OpenAI(temperature=0) embeddings = OpenAIEmbeddings() chain = load_qa_chain(llm, chain_type="stuff") pdf_inputs = [] key_ids = "" query = f""" Using the unique count of {key_ids} in this document, do the following: For each {key_ids}, extract the following information corresponding to the {key_ids}: """ rules_template = f"""Just give me the answer with {key_ids} line separated and nothing else.""" pdf_data = [] pdf_dir = '/pdfs/' fils = os.listdir(pdf_dir) for fil in fils: print("processing: " + fil) try: pdf_file = pdf_dir + fil texts = process_pdf(pdf_file) docsearch = doc_search(texts, embeddings) hwb_data = knowledge_graph( key_id="", docsearch=docsearch, pdf_inputs=pdf_inputs, query=query, rules_template=rules_template, chain=chain ) mwb = fil.split('-')[1] rows = rowify(hwb_data, extra=[mwb]) pdf_data.extend(rows) except InvalidRequestError: print(fil, "File needs handler.") cols = [] df = pd.DataFrame(columns=cols, data=pdf_data) edi_data = pandas_to_edi( edi_type='211', df=df, edi_key_col="", edi_data_col="", )
[ "langchain.chains.question_answering.load_qa_chain", "langchain.cache.InMemoryCache", "langchain.llms.OpenAI", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((634, 649), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (647, 649), False, 'from langchain.cache import InMemoryCache\n'), ((656, 677), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (662, 677), False, 'from langchain.llms import OpenAI\n'), ((692, 710), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (708, 710), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((720, 758), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (733, 758), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1113, 1132), 'os.listdir', 'os.listdir', (['pdf_dir'], {}), '(pdf_dir)\n', (1123, 1132), False, 'import os\n'), ((1741, 1782), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'cols', 'data': 'pdf_data'}), '(columns=cols, data=pdf_data)\n', (1753, 1782), True, 'import pandas as pd\n'), ((1795, 1864), 'edi.edi_formatter.pandas_to_edi', 'pandas_to_edi', ([], {'edi_type': '"""211"""', 'df': 'df', 'edi_key_col': '""""""', 'edi_data_col': '""""""'}), "(edi_type='211', df=df, edi_key_col='', edi_data_col='')\n", (1808, 1864), False, 'from edi.edi_formatter import pandas_to_edi\n'), ((1241, 1262), 'ocr.ocr.process_pdf', 'process_pdf', (['pdf_file'], {}), '(pdf_file)\n', (1252, 1262), False, 'from ocr.ocr import process_pdf\n'), ((1283, 1312), 'docs.search.doc_search', 'doc_search', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (1293, 1312), False, 'from docs.search import doc_search\n'), ((1332, 1463), 'extract.extract.knowledge_graph', 'knowledge_graph', ([], {'key_id': '""""""', 'docsearch': 'docsearch', 'pdf_inputs': 'pdf_inputs', 'query': 'query', 'rules_template': 'rules_template', 'chain': 'chain'}), "(key_id='', docsearch=docsearch, pdf_inputs=pdf_inputs,\n query=query, rules_template=rules_template, chain=chain)\n", (1347, 1463), False, 'from extract.extract import knowledge_graph\n'), ((1590, 1619), 'data.processing.rowify', 'rowify', (['hwb_data'], {'extra': '[mwb]'}), '(hwb_data, extra=[mwb])\n', (1596, 1619), False, 'from data.processing import rowify\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2023/2/24 16:23 # @Author : Jack # @File : main.py # @Software: PyCharm import asyncio import logging import socket import sys import consul import langchain import os import grpc from langchain import PromptTemplate, LLMChain from langchain.chat_models import ChatOpenAI from proto import chatgpt_pb2_grpc, chatgpt_pb2 from callback import StreamingLLMCallbackHandler default_port = 8099 def get_host_ip(): """ 查询本机ip地址 :return: ip """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(('8.8.8.8', 80)) ip = s.getsockname()[0] finally: s.close() return ip def register_service(consul_addr: str, consul_port: int, srv_port: int) -> consul.Consul: local_ip = get_host_ip() client = consul.Consul(host=consul_addr, port=consul_port, verify=False) client.agent.service.register( name="chatgpt", address=local_ip, port=srv_port, service_id=f"chatgpt-{local_ip}", timeout=10 ) return client def init_chatgpt() -> langchain.LLMChain: llm = ChatOpenAI(streaming=True, verbose=True, temperature=0.6) # Get prompt template template = ("""Assistant is a large language model trained by OpenAI. Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist. {human_input} Assistant:""") chat_prompt = PromptTemplate( input_variables=["human_input"], template=template ) # Construct Chain chain = LLMChain(llm=llm, prompt=chat_prompt, callbacks=[], verbose=True) return chain # Fill your openai api key. os.environ["OPENAI_API_KEY"] = "" class ChatgptService(chatgpt_pb2_grpc.ChatgptServicer): def __init__(self, chain): self.chain = chain async def Send(self, request: chatgpt_pb2.Message, context: grpc.aio.ServicerContext): stream_handler = StreamingLLMCallbackHandler(context, chatgpt_pb2) await self.chain.acall( {"human_input": request.content}, callbacks=[stream_handler] ) async def serve() -> None: """ Run grpc service """ server = grpc.aio.server() chain = init_chatgpt() chatgpt_pb2_grpc.add_ChatgptServicer_to_server(ChatgptService(chain), server=server) server.add_insecure_port(f"[::]:{default_port}") await server.start() print(f"Server started, listening on {default_port}") await server.wait_for_termination() if __name__ == '__main__': logging.basicConfig(level=logging.INFO) # Fill your consul service address and port. client = register_service("127.0.0.1", 8500, default_port) try: asyncio.get_event_loop().run_until_complete(serve()) except KeyboardInterrupt: client.agent.service.deregister(f"chatgpt-{get_host_ip()}") print("\nExiting...") sys.exit()
[ "langchain.LLMChain", "langchain.PromptTemplate", "langchain.chat_models.ChatOpenAI" ]
[((558, 606), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (571, 606), False, 'import socket\n'), ((875, 938), 'consul.Consul', 'consul.Consul', ([], {'host': 'consul_addr', 'port': 'consul_port', 'verify': '(False)'}), '(host=consul_addr, port=consul_port, verify=False)\n', (888, 938), False, 'import consul\n'), ((1198, 1255), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'verbose': '(True)', 'temperature': '(0.6)'}), '(streaming=True, verbose=True, temperature=0.6)\n', (1208, 1255), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2592, 2658), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['human_input']", 'template': 'template'}), "(input_variables=['human_input'], template=template)\n", (2606, 2658), False, 'from langchain import PromptTemplate, LLMChain\n'), ((2720, 2785), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'chat_prompt', 'callbacks': '[]', 'verbose': '(True)'}), '(llm=llm, prompt=chat_prompt, callbacks=[], verbose=True)\n', (2728, 2785), False, 'from langchain import PromptTemplate, LLMChain\n'), ((3378, 3395), 'grpc.aio.server', 'grpc.aio.server', ([], {}), '()\n', (3393, 3395), False, 'import grpc\n'), ((3731, 3770), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (3750, 3770), False, 'import logging\n'), ((3111, 3160), 'callback.StreamingLLMCallbackHandler', 'StreamingLLMCallbackHandler', (['context', 'chatgpt_pb2'], {}), '(context, chatgpt_pb2)\n', (3138, 3160), False, 'from callback import StreamingLLMCallbackHandler\n'), ((4097, 4107), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4105, 4107), False, 'import sys\n'), ((3904, 3928), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3926, 3928), False, 'import asyncio\n')]
import re from typing import Any, Dict, Literal, TypedDict, cast from dotenv import load_dotenv load_dotenv() import langchain import langchain.schema from langchain.chat_models import ChatOpenAI from langchain.llms import GPT4All from langchain.vectorstores import Chroma from langchain.chains import RetrievalQAWithSourcesChain, RetrievalQA from update_db import persist_directory, embedding from langchain import ( LLMChain, PromptTemplate, ) from langchain.memory import ConversationBufferWindowMemory from langchain.schema import BaseOutputParser, OutputParserException from langchain.callbacks.manager import Callbacks import chainlit as cl langchain.debug = True vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding) llm = ChatOpenAI(client=None, model="gpt-3.5-turbo-16k", temperature=0, streaming=True) # llm = GPT4All(model="./models/ggml-mpt-7b-instruct.bin", backend="mpt", verbose=True, temp=0.1, repeat_penalty=2) # type: ignore # llm = GPT4All(model="./models/ggml-gpt4all-j-v1.3-groovy.bin", backend="gptj", verbose=True, temp=0) # type: ignore memory = ConversationBufferWindowMemory(k=30, memory_key="history") class RoutingChainOutput(TypedDict): action: Literal["SEARCH", "REPLY"] param: str def simple_key_extract(key: str, output: str) -> str: found = re.search(f"{key}\\s?: (.*)", output, flags=re.IGNORECASE) if found is None: raise OutputParserException(f"Key '{key}:' not found on {output}") return found[1] class RoutingParser(BaseOutputParser[RoutingChainOutput]): def parse(self, output: str) -> Dict[str, Any]: return { "action": cast( Literal["SEARCH", "REPLY"], simple_key_extract("Action", output) ), "param": simple_key_extract("Param", output), } class RoutingChain(LLMChain): pass routing_chain = RoutingChain( llm=llm, memory=memory, prompt=PromptTemplate( template=""" You are a chatbot that primarly helps users to search on docs, but you may also chit-chat with them. Given a user input, choose the correction Action: SEARCH: if user is asking a question, it takes the search query as param REPLY: if user is just chit-chatting like greeting or asking how are you, it takes the reply from the chatbot as a param ==================== Input: hello there Action: REPLY Param: hey there, what are you looking for? Input: how does langchain work? Action: SEARCH Param: langchain how it works Input: code example of vector db Action: SEARCH Param: vector db code example Input: how is it going? Action: REPLY Param: I'm going well, how about you? {history} Input: {input} """, input_variables=["history", "input"], output_parser=RoutingParser(), ), ) qa_chain = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=vectordb.as_retriever(), ) def conversation(input: str, callbacks: Callbacks) -> str: route = cast( RoutingChainOutput, routing_chain.predict_and_parse(callbacks=callbacks, input=input), ) if route["action"] == "REPLY": return route["param"] elif route["action"] == "SEARCH": result = qa_chain.run(route["param"], callbacks=callbacks) return result else: return f"unknown action {route['action']}" @cl.langchain_factory(use_async=False) def factory(): return conversation
[ "langchain.schema.OutputParserException", "langchain.memory.ConversationBufferWindowMemory", "langchain.chat_models.ChatOpenAI", "langchain.vectorstores.Chroma" ]
[((97, 110), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (108, 110), False, 'from dotenv import load_dotenv\n'), ((694, 767), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'persist_directory', 'embedding_function': 'embedding'}), '(persist_directory=persist_directory, embedding_function=embedding)\n', (700, 767), False, 'from langchain.vectorstores import Chroma\n'), ((775, 861), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'client': 'None', 'model': '"""gpt-3.5-turbo-16k"""', 'temperature': '(0)', 'streaming': '(True)'}), "(client=None, model='gpt-3.5-turbo-16k', temperature=0, streaming\n =True)\n", (785, 861), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1118, 1176), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'k': '(30)', 'memory_key': '"""history"""'}), "(k=30, memory_key='history')\n", (1148, 1176), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((3516, 3553), 'chainlit.langchain_factory', 'cl.langchain_factory', ([], {'use_async': '(False)'}), '(use_async=False)\n', (3536, 3553), True, 'import chainlit as cl\n'), ((1338, 1396), 're.search', 're.search', (['f"""{key}\\\\s?: (.*)"""', 'output'], {'flags': 're.IGNORECASE'}), "(f'{key}\\\\s?: (.*)', output, flags=re.IGNORECASE)\n", (1347, 1396), False, 'import re\n'), ((1433, 1493), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Key \'{key}:\' not found on {output}"""'], {}), '(f"Key \'{key}:\' not found on {output}")\n', (1454, 1493), False, 'from langchain.schema import BaseOutputParser, OutputParserException\n')]
import langchain_visualizer # isort:skip # noqa: F401 import asyncio import vcr_langchain as vcr from langchain import PromptTemplate from langchain.chains import LLMChain from langchain.llms import OpenAI # ========================== Start of langchain example code ========================== # https://langchain.readthedocs.io/en/latest/modules/chains/getting_started.html llm = OpenAI(temperature=0) prompt = PromptTemplate( input_variables=["product"], template="What is a good name for a company that makes {product}?", ) chain = LLMChain(llm=llm, prompt=prompt) # ================================== Execute example ================================== @vcr.use_cassette() async def llm_chain_demo(): return chain.run("colorful socks") def test_llm_usage_succeeds(): """Check that the chain can run normally""" result = asyncio.get_event_loop().run_until_complete(llm_chain_demo()) assert result.strip() == "Socktastic!" if __name__ == "__main__": from langchain_visualizer import visualize visualize(llm_chain_demo)
[ "langchain.chains.LLMChain", "langchain_visualizer.visualize", "langchain.llms.OpenAI", "langchain.PromptTemplate" ]
[((387, 408), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (393, 408), False, 'from langchain.llms import OpenAI\n'), ((418, 534), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good name for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good name for a company that makes {product}?')\n", (432, 534), False, 'from langchain import PromptTemplate\n'), ((550, 582), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (558, 582), False, 'from langchain.chains import LLMChain\n'), ((676, 694), 'vcr_langchain.use_cassette', 'vcr.use_cassette', ([], {}), '()\n', (692, 694), True, 'import vcr_langchain as vcr\n'), ((1042, 1067), 'langchain_visualizer.visualize', 'visualize', (['llm_chain_demo'], {}), '(llm_chain_demo)\n', (1051, 1067), False, 'from langchain_visualizer import visualize\n'), ((856, 880), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (878, 880), False, 'import asyncio\n')]
"""Test logic on base chain class.""" from typing import Any, Dict, List, Optional import pytest from langchain.callbacks.base import CallbackManager from langchain.chains.base import Chain from langchain.schema import BaseMemory from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler class FakeMemory(BaseMemory): """Fake memory class for testing purposes.""" @property def memory_variables(self) -> List[str]: """Return baz variable.""" return ["baz"] def load_memory_variables( self, inputs: Optional[Dict[str, Any]] = None ) -> Dict[str, str]: """Return baz variable.""" return {"baz": "foo"} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Pass.""" pass def clear(self) -> None: """Pass.""" pass class FakeChain(Chain): """Fake chain class for testing purposes.""" be_correct: bool = True the_input_keys: List[str] = ["foo"] the_output_keys: List[str] = ["bar"] @property def input_keys(self) -> List[str]: """Input keys.""" return self.the_input_keys @property def output_keys(self) -> List[str]: """Output key of bar.""" return self.the_output_keys def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: if self.be_correct: return {"bar": "baz"} else: return {"baz": "bar"} def test_bad_inputs() -> None: """Test errors are raised if input keys are not found.""" chain = FakeChain() with pytest.raises(ValueError): chain({"foobar": "baz"}) def test_bad_outputs() -> None: """Test errors are raised if outputs keys are not found.""" chain = FakeChain(be_correct=False) with pytest.raises(ValueError): chain({"foo": "baz"}) def test_correct_call() -> None: """Test correct call of fake chain.""" chain = FakeChain() output = chain({"foo": "bar"}) assert output == {"foo": "bar", "bar": "baz"} def test_single_input_correct() -> None: """Test passing single input works.""" chain = FakeChain() output = chain("bar") assert output == {"foo": "bar", "bar": "baz"} def test_single_input_error() -> None: """Test passing single input errors as expected.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain("bar") def test_run_single_arg() -> None: """Test run method with single arg.""" chain = FakeChain() output = chain.run("bar") assert output == "baz" def test_run_multiple_args_error() -> None: """Test run method with multiple args errors as expected.""" chain = FakeChain() with pytest.raises(ValueError): chain.run("bar", "foo") def test_run_kwargs() -> None: """Test run method with kwargs.""" chain = FakeChain(the_input_keys=["foo", "bar"]) output = chain.run(foo="bar", bar="foo") assert output == "baz" def test_run_kwargs_error() -> None: """Test run method with kwargs errors as expected.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run(foo="bar", baz="foo") def test_run_args_and_kwargs_error() -> None: """Test run method with args and kwargs.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar", foo="bar") def test_multiple_output_keys_error() -> None: """Test run with multiple output keys errors as expected.""" chain = FakeChain(the_output_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar") def test_run_arg_with_memory() -> None: """Test run method works when arg is passed.""" chain = FakeChain(the_input_keys=["foo", "baz"], memory=FakeMemory()) chain.run("bar") def test_run_with_callback() -> None: """Test run method works when callback manager is passed.""" handler = FakeCallbackHandler() chain = FakeChain( callback_manager=CallbackManager(handlers=[handler]), verbose=True ) output = chain.run("bar") assert output == "baz" assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0 def test_run_with_callback_not_verbose() -> None: """Test run method works when callback manager is passed and not verbose.""" import langchain langchain.verbose = False handler = FakeCallbackHandler() chain = FakeChain(callback_manager=CallbackManager(handlers=[handler])) output = chain.run("bar") assert output == "baz" assert handler.starts == 0 assert handler.ends == 0 assert handler.errors == 0
[ "langchain.callbacks.base.CallbackManager" ]
[((3986, 4007), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4005, 4007), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((4460, 4481), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4479, 4481), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((1597, 1622), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1610, 1622), False, 'import pytest\n'), ((1804, 1829), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1817, 1829), False, 'import pytest\n'), ((2393, 2418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2406, 2418), False, 'import pytest\n'), ((2746, 2771), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2759, 2771), False, 'import pytest\n'), ((3161, 3186), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3174, 3186), False, 'import pytest\n'), ((3386, 3411), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3399, 3411), False, 'import pytest\n'), ((3626, 3651), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3639, 3651), False, 'import pytest\n'), ((4056, 4091), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4071, 4091), False, 'from langchain.callbacks.base import CallbackManager\n'), ((4521, 4556), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4536, 4556), False, 'from langchain.callbacks.base import CallbackManager\n')]
"""Test caching for LLMs and ChatModels.""" from typing import Dict, Generator, List, Union import pytest from _pytest.fixtures import FixtureRequest from sqlalchemy import create_engine from sqlalchemy.orm import Session import langchain from langchain.cache import ( InMemoryCache, SQLAlchemyCache, ) from langchain.chat_models import FakeListChatModel from langchain.chat_models.base import BaseChatModel, dumps from langchain.llms import FakeListLLM from langchain.llms.base import BaseLLM from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage, ) def get_sqlite_cache() -> SQLAlchemyCache: return SQLAlchemyCache(engine=create_engine("sqlite://")) CACHE_OPTIONS = [ InMemoryCache, get_sqlite_cache, ] @pytest.fixture(autouse=True, params=CACHE_OPTIONS) def set_cache_and_teardown(request: FixtureRequest) -> Generator[None, None, None]: # Will be run before each test cache_instance = request.param langchain.llm_cache = cache_instance() if langchain.llm_cache: langchain.llm_cache.clear() else: raise ValueError("Cache not set. This should never happen.") yield # Will be run after each test if langchain.llm_cache: langchain.llm_cache.clear() else: raise ValueError("Cache not set. This should never happen.") def test_llm_caching() -> None: prompt = "How are you?" response = "Test response" cached_response = "Cached test response" llm = FakeListLLM(responses=[response]) if langchain.llm_cache: langchain.llm_cache.update( prompt=prompt, llm_string=create_llm_string(llm), return_val=[Generation(text=cached_response)], ) assert llm(prompt) == cached_response else: raise ValueError( "The cache not set. This should never happen, as the pytest fixture " "`set_cache_and_teardown` always sets the cache." ) def test_old_sqlite_llm_caching() -> None: if isinstance(langchain.llm_cache, SQLAlchemyCache): prompt = "How are you?" response = "Test response" cached_response = "Cached test response" llm = FakeListLLM(responses=[response]) items = [ langchain.llm_cache.cache_schema( prompt=prompt, llm=create_llm_string(llm), response=cached_response, idx=0, ) ] with Session(langchain.llm_cache.engine) as session, session.begin(): for item in items: session.merge(item) assert llm(prompt) == cached_response def test_chat_model_caching() -> None: prompt: List[BaseMessage] = [HumanMessage(content="How are you?")] response = "Test response" cached_response = "Cached test response" cached_message = AIMessage(content=cached_response) llm = FakeListChatModel(responses=[response]) if langchain.llm_cache: langchain.llm_cache.update( prompt=dumps(prompt), llm_string=llm._get_llm_string(), return_val=[ChatGeneration(message=cached_message)], ) result = llm(prompt) assert isinstance(result, AIMessage) assert result.content == cached_response else: raise ValueError( "The cache not set. This should never happen, as the pytest fixture " "`set_cache_and_teardown` always sets the cache." ) def test_chat_model_caching_params() -> None: prompt: List[BaseMessage] = [HumanMessage(content="How are you?")] response = "Test response" cached_response = "Cached test response" cached_message = AIMessage(content=cached_response) llm = FakeListChatModel(responses=[response]) if langchain.llm_cache: langchain.llm_cache.update( prompt=dumps(prompt), llm_string=llm._get_llm_string(functions=[]), return_val=[ChatGeneration(message=cached_message)], ) result = llm(prompt, functions=[]) assert isinstance(result, AIMessage) assert result.content == cached_response result_no_params = llm(prompt) assert isinstance(result_no_params, AIMessage) assert result_no_params.content == response else: raise ValueError( "The cache not set. This should never happen, as the pytest fixture " "`set_cache_and_teardown` always sets the cache." ) def create_llm_string(llm: Union[BaseLLM, BaseChatModel]) -> str: _dict: Dict = llm.dict() _dict["stop"] = None return str(sorted([(k, v) for k, v in _dict.items()]))
[ "langchain.llm_cache.clear", "langchain.schema.Generation", "langchain.chat_models.FakeListChatModel", "langchain.llms.FakeListLLM", "langchain.schema.ChatGeneration", "langchain.schema.HumanMessage", "langchain.schema.AIMessage", "langchain.chat_models.base.dumps" ]
[((796, 846), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'params': 'CACHE_OPTIONS'}), '(autouse=True, params=CACHE_OPTIONS)\n', (810, 846), False, 'import pytest\n'), ((1524, 1557), 'langchain.llms.FakeListLLM', 'FakeListLLM', ([], {'responses': '[response]'}), '(responses=[response])\n', (1535, 1557), False, 'from langchain.llms import FakeListLLM\n'), ((2895, 2929), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'cached_response'}), '(content=cached_response)\n', (2904, 2929), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((2940, 2979), 'langchain.chat_models.FakeListChatModel', 'FakeListChatModel', ([], {'responses': '[response]'}), '(responses=[response])\n', (2957, 2979), False, 'from langchain.chat_models import FakeListChatModel\n'), ((3728, 3762), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'cached_response'}), '(content=cached_response)\n', (3737, 3762), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((3773, 3812), 'langchain.chat_models.FakeListChatModel', 'FakeListChatModel', ([], {'responses': '[response]'}), '(responses=[response])\n', (3790, 3812), False, 'from langchain.chat_models import FakeListChatModel\n'), ((1080, 1107), 'langchain.llm_cache.clear', 'langchain.llm_cache.clear', ([], {}), '()\n', (1105, 1107), False, 'import langchain\n'), ((1269, 1296), 'langchain.llm_cache.clear', 'langchain.llm_cache.clear', ([], {}), '()\n', (1294, 1296), False, 'import langchain\n'), ((2233, 2266), 'langchain.llms.FakeListLLM', 'FakeListLLM', ([], {'responses': '[response]'}), '(responses=[response])\n', (2244, 2266), False, 'from langchain.llms import FakeListLLM\n'), ((2760, 2796), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""How are you?"""'}), "(content='How are you?')\n", (2772, 2796), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((3593, 3629), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""How are you?"""'}), "(content='How are you?')\n", (3605, 3629), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((702, 728), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (715, 728), False, 'from sqlalchemy import create_engine\n'), ((2508, 2543), 'sqlalchemy.orm.Session', 'Session', (['langchain.llm_cache.engine'], {}), '(langchain.llm_cache.engine)\n', (2515, 2543), False, 'from sqlalchemy.orm import Session\n'), ((3063, 3076), 'langchain.chat_models.base.dumps', 'dumps', (['prompt'], {}), '(prompt)\n', (3068, 3076), False, 'from langchain.chat_models.base import BaseChatModel, dumps\n'), ((3896, 3909), 'langchain.chat_models.base.dumps', 'dumps', (['prompt'], {}), '(prompt)\n', (3901, 3909), False, 'from langchain.chat_models.base import BaseChatModel, dumps\n'), ((1720, 1752), 'langchain.schema.Generation', 'Generation', ([], {'text': 'cached_response'}), '(text=cached_response)\n', (1730, 1752), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((3148, 3186), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'cached_message'}), '(message=cached_message)\n', (3162, 3186), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((3993, 4031), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'cached_message'}), '(message=cached_message)\n', (4007, 4031), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n')]
import json import pytest from langchain.prompts import ChatPromptTemplate from langchain.schema.exceptions import LangChainException from langchain.schema.messages import HumanMessage from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError pytest_plugins = ("pytest_asyncio",) def test_bedrock_caller_load_settings(mocker, mock_settings): mocked_boto3_client = mocker.patch( "llm_api.backends.bedrock.BedrockCaller.get_boto3_client" ) mocked_bedrock_client = mocker.patch( "llm_api.backends.bedrock.BedrockCaller.get_client" ) caller = BedrockCaller(mock_settings) expected_test_key = mock_settings.aws_secret_access_key.get_secret_value() assert caller.settings.aws_secret_access_key.get_secret_value() == expected_test_key mocked_boto3_client.assert_called_once() mocked_bedrock_client.assert_called_once() def test_generate_openai_prompt_success(): user_input = "What day is it today?" prompt_output = BedrockCaller.generate_prompt() assert isinstance(prompt_output, ChatPromptTemplate) prompt_output = prompt_output.format_messages(text=user_input) expected_prompt_elements = 5 assert len(prompt_output) == expected_prompt_elements assert isinstance(prompt_output[-1], HumanMessage) assert prompt_output[-1].content == user_input @pytest.mark.asyncio async def test_call_model_success(mocker, mock_settings): caller = BedrockCaller(mock_settings) expected_entities = ["William Shakespeare", "Globe Theatre"] mocked_result = { "entities": [ { "uri": "William Shakespeare", "description": "English playwright, poet, and actor", "wikipedia_url": "https://en.wikipedia.org/wiki/William_Shakespeare", }, { "uri": "Globe Theatre", "description": "Theatre in London associated with William Shakespeare", "wikipedia_url": "https://en.wikipedia.org/wiki/Globe_Theatre", }, ], "connections": [ { "from": "William Shakespeare", "to": "Globe Theatre", "label": "performed plays at", }, ], "user_search": "Who is Shakespeare?", } mocked_result = "Test json ```json" + json.dumps(mocked_result) + "```" mocker.patch( "langchain.schema.runnable.base.RunnableSequence.ainvoke", return_value=mocked_result, ) user_template = "{text}" test_prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a test system"), ("system", "Provide a valid JSON response to the user."), ("user", user_template), ] ) test_search = "Who is Shakespeare?" response = await caller.call_model(test_prompt, test_search) assert expected_entities == [entity["uri"] for entity in response["entities"]] @pytest.mark.asyncio async def test_call_model_failure_index_error(mocker, mock_settings): caller = BedrockCaller(mock_settings) mocked_result = { "entities": [ { "uri": "William Shakespeare", "description": "English playwright, poet, and actor", "wikipedia_url": "https://en.wikipedia.org/wiki/William_Shakespeare", }, { "uri": "Globe Theatre", "description": "Theatre in London associated with William Shakespeare", "wikipedia_url": "https://en.wikipedia.org/wiki/Globe_Theatre", }, ], "connections": [ { "from": "William Shakespeare", "to": "Globe Theatre", "label": "performed plays at", }, ], "user_search": "Who is Shakespeare?", } mocked_result = "Test json" + json.dumps(mocked_result) mocker.patch( "langchain.schema.runnable.base.RunnableSequence.ainvoke", return_value=mocked_result, ) expected_error_message = "Unable to parse model output as expected." user_template = "{text}" test_prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a test system"), ("system", "Provide a valid JSON response to the user."), ("user", user_template), ] ) test_search = "Who is Shakespeare?" with pytest.raises(BedrockModelCallError) as exception: await caller.call_model(test_prompt, test_search) assert expected_error_message in str(exception.value) @pytest.mark.asyncio async def test_call_model_failure_json_decode_error(mocker, mock_settings): caller = BedrockCaller(mock_settings) mocked_result = { "entities": [ { "uri": "William Shakespeare", "description": "English playwright, poet, and actor", "wikipedia_url": "https://en.wikipedia.org/wiki/William_Shakespeare", }, { "uri": "Globe Theatre", "description": "Theatre in London associated with William Shakespeare", "wikipedia_url": "https://en.wikipedia.org/wiki/Globe_Theatre", }, ], "connections": [ { "from": "William Shakespeare", "to": "Globe Theatre", "label": "performed plays at", }, ], "user_search": "Who is Shakespeare?", } mocked_result = "Test ```json," + json.dumps(mocked_result) + "```" mocker.patch( "langchain.schema.runnable.base.RunnableSequence.ainvoke", return_value=mocked_result, ) expected_error_message = "Error decoding model output." user_template = "{text}" test_prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a test system"), ("system", "Provide a valid JSON response to the user."), ("user", user_template), ] ) test_search = "Who is Shakespeare?" with pytest.raises(BedrockModelCallError) as exception: await caller.call_model(test_prompt, test_search) assert expected_error_message in str(exception.value) @pytest.mark.asyncio async def test_call_model_failure_api_error(mocker, mock_settings): caller = BedrockCaller(mock_settings) mocked_client_call = mocker.patch( "langchain.schema.runnable.base.RunnableSequence.ainvoke" ) expected_error_message = "Error calling model." mocked_client_call.side_effect = ValueError() user_template = "{text}" test_prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a test system"), ("system", "Provide a valid JSON response to the user."), ("user", user_template), ] ) test_search = "Who is Shakespeare?" with pytest.raises(BedrockModelCallError) as exception: await caller.call_model(test_prompt, test_search) assert expected_error_message in str(exception.value) @pytest.mark.asyncio async def test_call_model_failure_langchain_error(mocker, mock_settings): caller = BedrockCaller(mock_settings) mocked_client_call = mocker.patch( "langchain.schema.runnable.base.RunnableSequence.ainvoke" ) expected_error_message = "Error sending prompt to LLM." mocked_client_call.side_effect = LangChainException() user_template = "{text}" test_prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a test system"), ("system", "Provide a valid JSON response to the user."), ("user", user_template), ] ) test_search = "Who is Shakespeare?" with pytest.raises(BedrockModelCallError) as exception: await caller.call_model(test_prompt, test_search) assert expected_error_message in str(exception.value)
[ "langchain.prompts.ChatPromptTemplate.from_messages", "langchain.schema.exceptions.LangChainException" ]
[((597, 625), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (610, 625), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((994, 1025), 'llm_api.backends.bedrock.BedrockCaller.generate_prompt', 'BedrockCaller.generate_prompt', ([], {}), '()\n', (1023, 1025), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((1443, 1471), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (1456, 1471), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((2561, 2724), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', 'You are a test system'), ('system',\n 'Provide a valid JSON response to the user.'), ('user', user_template)]"], {}), "([('system', 'You are a test system'), (\n 'system', 'Provide a valid JSON response to the user.'), ('user',\n user_template)])\n", (2593, 2724), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((3073, 3101), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (3086, 3101), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((4183, 4346), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', 'You are a test system'), ('system',\n 'Provide a valid JSON response to the user.'), ('user', user_template)]"], {}), "([('system', 'You are a test system'), (\n 'system', 'Provide a valid JSON response to the user.'), ('user',\n user_template)])\n", (4215, 4346), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((4728, 4756), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (4741, 4756), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((5837, 6000), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', 'You are a test system'), ('system',\n 'Provide a valid JSON response to the user.'), ('user', user_template)]"], {}), "([('system', 'You are a test system'), (\n 'system', 'Provide a valid JSON response to the user.'), ('user',\n user_template)])\n", (5869, 6000), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((6374, 6402), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (6387, 6402), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((6666, 6829), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', 'You are a test system'), ('system',\n 'Provide a valid JSON response to the user.'), ('user', user_template)]"], {}), "([('system', 'You are a test system'), (\n 'system', 'Provide a valid JSON response to the user.'), ('user',\n user_template)])\n", (6698, 6829), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((7210, 7238), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (7223, 7238), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((7449, 7469), 'langchain.schema.exceptions.LangChainException', 'LangChainException', ([], {}), '()\n', (7467, 7469), False, 'from langchain.schema.exceptions import LangChainException\n'), ((7518, 7681), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', 'You are a test system'), ('system',\n 'Provide a valid JSON response to the user.'), ('user', user_template)]"], {}), "([('system', 'You are a test system'), (\n 'system', 'Provide a valid JSON response to the user.'), ('user',\n user_template)])\n", (7550, 7681), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((3909, 3934), 'json.dumps', 'json.dumps', (['mocked_result'], {}), '(mocked_result)\n', (3919, 3934), False, 'import json\n'), ((4449, 4485), 'pytest.raises', 'pytest.raises', (['BedrockModelCallError'], {}), '(BedrockModelCallError)\n', (4462, 4485), False, 'import pytest\n'), ((6103, 6139), 'pytest.raises', 'pytest.raises', (['BedrockModelCallError'], {}), '(BedrockModelCallError)\n', (6116, 6139), False, 'import pytest\n'), ((6932, 6968), 'pytest.raises', 'pytest.raises', (['BedrockModelCallError'], {}), '(BedrockModelCallError)\n', (6945, 6968), False, 'import pytest\n'), ((7784, 7820), 'pytest.raises', 'pytest.raises', (['BedrockModelCallError'], {}), '(BedrockModelCallError)\n', (7797, 7820), False, 'import pytest\n'), ((2353, 2378), 'json.dumps', 'json.dumps', (['mocked_result'], {}), '(mocked_result)\n', (2363, 2378), False, 'import json\n'), ((5568, 5593), 'json.dumps', 'json.dumps', (['mocked_result'], {}), '(mocked_result)\n', (5578, 5593), False, 'import json\n')]
"""Test Tracer classes.""" from __future__ import annotations import json from datetime import datetime from typing import Tuple from unittest.mock import patch from uuid import UUID, uuid4 import pytest from freezegun import freeze_time from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.callbacks.tracers.schemas import Run, RunTypeEnum, TracerSession from langchain.schema import LLMResult _SESSION_ID = UUID("4fbf7c55-2727-4711-8964-d821ed4d4e2a") _TENANT_ID = UUID("57a08cc4-73d2-4236-8378-549099d07fad") @pytest.fixture def lang_chain_tracer_v2(monkeypatch: pytest.MonkeyPatch) -> LangChainTracer: monkeypatch.setenv("LANGCHAIN_TENANT_ID", "test-tenant-id") monkeypatch.setenv("LANGCHAIN_ENDPOINT", "http://test-endpoint.com") monkeypatch.setenv("LANGCHAIN_API_KEY", "foo") tracer = LangChainTracer() return tracer # Mock a sample TracerSession object @pytest.fixture def sample_tracer_session_v2() -> TracerSession: return TracerSession(id=_SESSION_ID, name="Sample session", tenant_id=_TENANT_ID) @freeze_time("2023-01-01") @pytest.fixture def sample_runs() -> Tuple[Run, Run, Run]: llm_run = Run( id="57a08cc4-73d2-4236-8370-549099d07fad", name="llm_run", execution_order=1, child_execution_order=1, parent_run_id="57a08cc4-73d2-4236-8371-549099d07fad", start_time=datetime.utcnow(), end_time=datetime.utcnow(), session_id=1, inputs={"prompts": []}, outputs=LLMResult(generations=[[]]).dict(), serialized={}, extra={}, run_type=RunTypeEnum.llm, ) chain_run = Run( id="57a08cc4-73d2-4236-8371-549099d07fad", name="chain_run", execution_order=1, start_time=datetime.utcnow(), end_time=datetime.utcnow(), child_execution_order=1, serialized={}, inputs={}, outputs={}, child_runs=[llm_run], extra={}, run_type=RunTypeEnum.chain, ) tool_run = Run( id="57a08cc4-73d2-4236-8372-549099d07fad", name="tool_run", execution_order=1, child_execution_order=1, inputs={"input": "test"}, start_time=datetime.utcnow(), end_time=datetime.utcnow(), outputs=None, serialized={}, child_runs=[], extra={}, run_type=RunTypeEnum.tool, ) return llm_run, chain_run, tool_run def test_persist_run( lang_chain_tracer_v2: LangChainTracer, sample_tracer_session_v2: TracerSession, sample_runs: Tuple[Run, Run, Run], ) -> None: """Test that persist_run method calls requests.post once per method call.""" with patch("langchain.callbacks.tracers.langchain.requests.post") as post, patch( "langchain.callbacks.tracers.langchain.requests.get" ) as get: post.return_value.raise_for_status.return_value = None lang_chain_tracer_v2.session = sample_tracer_session_v2 for run in sample_runs: lang_chain_tracer_v2.run_map[str(run.id)] = run for run in sample_runs: lang_chain_tracer_v2._end_trace(run) assert post.call_count == 3 assert get.call_count == 0 def test_persist_run_with_example_id( lang_chain_tracer_v2: LangChainTracer, sample_tracer_session_v2: TracerSession, sample_runs: Tuple[Run, Run, Run], ) -> None: """Test the example ID is assigned only to the parent run and not the children.""" example_id = uuid4() llm_run, chain_run, tool_run = sample_runs chain_run.child_runs = [tool_run] tool_run.child_runs = [llm_run] with patch("langchain.callbacks.tracers.langchain.requests.post") as post, patch( "langchain.callbacks.tracers.langchain.requests.get" ) as get: post.return_value.raise_for_status.return_value = None lang_chain_tracer_v2.session = sample_tracer_session_v2 lang_chain_tracer_v2.example_id = example_id lang_chain_tracer_v2._persist_run(chain_run) assert post.call_count == 3 assert get.call_count == 0 posted_data = [ json.loads(call_args[1]["data"]) for call_args in post.call_args_list ] assert posted_data[0]["id"] == str(chain_run.id) assert posted_data[0]["reference_example_id"] == str(example_id) assert posted_data[1]["id"] == str(tool_run.id) assert not posted_data[1].get("reference_example_id") assert posted_data[2]["id"] == str(llm_run.id) assert not posted_data[2].get("reference_example_id")
[ "langchain.callbacks.tracers.langchain.LangChainTracer", "langchain.schema.LLMResult", "langchain.callbacks.tracers.schemas.TracerSession" ]
[((441, 485), 'uuid.UUID', 'UUID', (['"""4fbf7c55-2727-4711-8964-d821ed4d4e2a"""'], {}), "('4fbf7c55-2727-4711-8964-d821ed4d4e2a')\n", (445, 485), False, 'from uuid import UUID, uuid4\n'), ((499, 543), 'uuid.UUID', 'UUID', (['"""57a08cc4-73d2-4236-8378-549099d07fad"""'], {}), "('57a08cc4-73d2-4236-8378-549099d07fad')\n", (503, 543), False, 'from uuid import UUID, uuid4\n'), ((1070, 1095), 'freezegun.freeze_time', 'freeze_time', (['"""2023-01-01"""'], {}), "('2023-01-01')\n", (1081, 1095), False, 'from freezegun import freeze_time\n'), ((841, 858), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (856, 858), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((992, 1066), 'langchain.callbacks.tracers.schemas.TracerSession', 'TracerSession', ([], {'id': '_SESSION_ID', 'name': '"""Sample session"""', 'tenant_id': '_TENANT_ID'}), "(id=_SESSION_ID, name='Sample session', tenant_id=_TENANT_ID)\n", (1005, 1066), False, 'from langchain.callbacks.tracers.schemas import Run, RunTypeEnum, TracerSession\n'), ((3506, 3513), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3511, 3513), False, 'from uuid import UUID, uuid4\n'), ((2700, 2760), 'unittest.mock.patch', 'patch', (['"""langchain.callbacks.tracers.langchain.requests.post"""'], {}), "('langchain.callbacks.tracers.langchain.requests.post')\n", (2705, 2760), False, 'from unittest.mock import patch\n'), ((2770, 2829), 'unittest.mock.patch', 'patch', (['"""langchain.callbacks.tracers.langchain.requests.get"""'], {}), "('langchain.callbacks.tracers.langchain.requests.get')\n", (2775, 2829), False, 'from unittest.mock import patch\n'), ((3644, 3704), 'unittest.mock.patch', 'patch', (['"""langchain.callbacks.tracers.langchain.requests.post"""'], {}), "('langchain.callbacks.tracers.langchain.requests.post')\n", (3649, 3704), False, 'from unittest.mock import patch\n'), ((3714, 3773), 'unittest.mock.patch', 'patch', (['"""langchain.callbacks.tracers.langchain.requests.get"""'], {}), "('langchain.callbacks.tracers.langchain.requests.get')\n", (3719, 3773), False, 'from unittest.mock import patch\n'), ((1390, 1407), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1405, 1407), False, 'from datetime import datetime\n'), ((1426, 1443), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1441, 1443), False, 'from datetime import datetime\n'), ((1776, 1793), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1791, 1793), False, 'from datetime import datetime\n'), ((1812, 1829), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1827, 1829), False, 'from datetime import datetime\n'), ((2226, 2243), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2241, 2243), False, 'from datetime import datetime\n'), ((2262, 2279), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2277, 2279), False, 'from datetime import datetime\n'), ((4137, 4169), 'json.loads', 'json.loads', (["call_args[1]['data']"], {}), "(call_args[1]['data'])\n", (4147, 4169), False, 'import json\n'), ((1515, 1542), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[[]]'}), '(generations=[[]])\n', (1524, 1542), False, 'from langchain.schema import LLMResult\n')]
import langchain_visualizer # isort:skip # noqa: F401 import asyncio from typing import Any, Dict, List, Optional import vcr_langchain as vcr from langchain import PromptTemplate from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains import LLMChain from langchain.chains.base import Chain from langchain.llms import OpenAI # ========================== Start of langchain example code ========================== # https://langchain.readthedocs.io/en/latest/modules/chains/getting_started.html class ConcatenateChain(Chain): chain_1: LLMChain chain_2: LLMChain @property def input_keys(self) -> List[str]: # Union of the input keys of the two chains. all_input_vars = set(self.chain_1.input_keys).union( set(self.chain_2.input_keys) ) return list(all_input_vars) @property def output_keys(self) -> List[str]: return ["concat_output"] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: output_1 = self.chain_1.run(inputs) output_2 = self.chain_2.run(inputs) return {"concat_output": output_1 + output_2} llm = OpenAI() prompt_1 = PromptTemplate( input_variables=["product"], template="What is a good name for a company that makes {product}?", ) chain_1 = LLMChain(llm=llm, prompt=prompt_1) prompt_2 = PromptTemplate( input_variables=["product"], template="What is a good slogan for a company that makes {product}?", ) chain_2 = LLMChain(llm=llm, prompt=prompt_2) concat_chain = ConcatenateChain(chain_1=chain_1, chain_2=chain_2) chain = concat_chain # ================================== Execute example ================================== @vcr.use_cassette() async def custom_chain_demo(): return chain.run("colorful socks") def test_llm_usage_succeeds(): """Check that the chain can run normally""" result = asyncio.get_event_loop().run_until_complete(custom_chain_demo()) assert ( result.strip() == 'Sock Spectacular.\n\n"Step Up Your Style with Colorful Socks!"' ) if __name__ == "__main__": from langchain_visualizer import visualize visualize(custom_chain_demo)
[ "langchain.chains.LLMChain", "langchain_visualizer.visualize", "langchain.llms.OpenAI", "langchain.PromptTemplate" ]
[((1254, 1262), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (1260, 1262), False, 'from langchain.llms import OpenAI\n'), ((1275, 1391), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good name for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good name for a company that makes {product}?')\n", (1289, 1391), False, 'from langchain import PromptTemplate\n'), ((1408, 1442), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_1'}), '(llm=llm, prompt=prompt_1)\n', (1416, 1442), False, 'from langchain.chains import LLMChain\n'), ((1455, 1573), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good slogan for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good slogan for a company that makes {product}?')\n", (1469, 1573), False, 'from langchain import PromptTemplate\n'), ((1590, 1624), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_2'}), '(llm=llm, prompt=prompt_2)\n', (1598, 1624), False, 'from langchain.chains import LLMChain\n'), ((1806, 1824), 'vcr_langchain.use_cassette', 'vcr.use_cassette', ([], {}), '()\n', (1822, 1824), True, 'import vcr_langchain as vcr\n'), ((2253, 2281), 'langchain_visualizer.visualize', 'visualize', (['custom_chain_demo'], {}), '(custom_chain_demo)\n', (2262, 2281), False, 'from langchain_visualizer import visualize\n'), ((1989, 2013), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2011, 2013), False, 'import asyncio\n')]
"""Test Momento cache functionality. To run tests, set the environment variable MOMENTO_AUTH_TOKEN to a valid Momento auth token. This can be obtained by signing up for a free Momento account at https://gomomento.com/. """ from __future__ import annotations import uuid from datetime import timedelta from typing import Iterator import pytest import langchain from langchain.cache import MomentoCache from langchain.schema import Generation, LLMResult from tests.unit_tests.llms.fake_llm import FakeLLM def random_string() -> str: return str(uuid.uuid4()) @pytest.fixture(scope="module") def momento_cache() -> Iterator[MomentoCache]: from momento import CacheClient, Configurations, CredentialProvider cache_name = f"langchain-test-cache-{random_string()}" client = CacheClient( Configurations.Laptop.v1(), CredentialProvider.from_environment_variable("MOMENTO_AUTH_TOKEN"), default_ttl=timedelta(seconds=30), ) try: llm_cache = MomentoCache(client, cache_name) langchain.llm_cache = llm_cache yield llm_cache finally: client.delete_cache(cache_name) def test_invalid_ttl() -> None: from momento import CacheClient, Configurations, CredentialProvider client = CacheClient( Configurations.Laptop.v1(), CredentialProvider.from_environment_variable("MOMENTO_AUTH_TOKEN"), default_ttl=timedelta(seconds=30), ) with pytest.raises(ValueError): MomentoCache(client, cache_name=random_string(), ttl=timedelta(seconds=-1)) def test_momento_cache_miss(momento_cache: MomentoCache) -> None: llm = FakeLLM() stub_llm_output = LLMResult(generations=[[Generation(text="foo")]]) assert llm.generate([random_string()]) == stub_llm_output @pytest.mark.parametrize( "prompts, generations", [ # Single prompt, single generation ([random_string()], [[random_string()]]), # Single prompt, multiple generations ([random_string()], [[random_string(), random_string()]]), # Single prompt, multiple generations ([random_string()], [[random_string(), random_string(), random_string()]]), # Multiple prompts, multiple generations ( [random_string(), random_string()], [[random_string()], [random_string(), random_string()]], ), ], ) def test_momento_cache_hit( momento_cache: MomentoCache, prompts: list[str], generations: list[list[str]] ) -> None: llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) llm_generations = [ [ Generation(text=generation, generation_info=params) for generation in prompt_i_generations ] for prompt_i_generations in generations ] for prompt_i, llm_generations_i in zip(prompts, llm_generations): momento_cache.update(prompt_i, llm_string, llm_generations_i) assert llm.generate(prompts) == LLMResult( generations=llm_generations, llm_output={} )
[ "langchain.cache.MomentoCache", "langchain.schema.LLMResult", "langchain.schema.Generation" ]
[((569, 599), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (583, 599), False, 'import pytest\n'), ((1637, 1646), 'tests.unit_tests.llms.fake_llm.FakeLLM', 'FakeLLM', ([], {}), '()\n', (1644, 1646), False, 'from tests.unit_tests.llms.fake_llm import FakeLLM\n'), ((2507, 2516), 'tests.unit_tests.llms.fake_llm.FakeLLM', 'FakeLLM', ([], {}), '()\n', (2514, 2516), False, 'from tests.unit_tests.llms.fake_llm import FakeLLM\n'), ((552, 564), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (562, 564), False, 'import uuid\n'), ((813, 839), 'momento.Configurations.Laptop.v1', 'Configurations.Laptop.v1', ([], {}), '()\n', (837, 839), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((849, 915), 'momento.CredentialProvider.from_environment_variable', 'CredentialProvider.from_environment_variable', (['"""MOMENTO_AUTH_TOKEN"""'], {}), "('MOMENTO_AUTH_TOKEN')\n", (893, 915), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((995, 1027), 'langchain.cache.MomentoCache', 'MomentoCache', (['client', 'cache_name'], {}), '(client, cache_name)\n', (1007, 1027), False, 'from langchain.cache import MomentoCache\n'), ((1286, 1312), 'momento.Configurations.Laptop.v1', 'Configurations.Laptop.v1', ([], {}), '()\n', (1310, 1312), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((1322, 1388), 'momento.CredentialProvider.from_environment_variable', 'CredentialProvider.from_environment_variable', (['"""MOMENTO_AUTH_TOKEN"""'], {}), "('MOMENTO_AUTH_TOKEN')\n", (1366, 1388), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((1448, 1473), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1461, 1473), False, 'import pytest\n'), ((3024, 3077), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'llm_generations', 'llm_output': '{}'}), '(generations=llm_generations, llm_output={})\n', (3033, 3077), False, 'from langchain.schema import Generation, LLMResult\n'), ((937, 958), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(30)'}), '(seconds=30)\n', (946, 958), False, 'from datetime import timedelta\n'), ((1410, 1431), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(30)'}), '(seconds=30)\n', (1419, 1431), False, 'from datetime import timedelta\n'), ((2680, 2731), 'langchain.schema.Generation', 'Generation', ([], {'text': 'generation', 'generation_info': 'params'}), '(text=generation, generation_info=params)\n', (2690, 2731), False, 'from langchain.schema import Generation, LLMResult\n'), ((1536, 1557), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(-1)'}), '(seconds=-1)\n', (1545, 1557), False, 'from datetime import timedelta\n'), ((1693, 1715), 'langchain.schema.Generation', 'Generation', ([], {'text': '"""foo"""'}), "(text='foo')\n", (1703, 1715), False, 'from langchain.schema import Generation, LLMResult\n')]
import threading import time import unittest import unittest.mock from typing import Any, Dict from uuid import UUID import pytest from langchain_core.outputs import LLMResult from langchain_core.tracers.langchain import LangChainTracer from langchain_core.tracers.schemas import Run from langsmith import Client def test_example_id_assignment_threadsafe() -> None: """Test that example assigned at callback start/end is honored.""" example_ids = {} def mock_create_run(**kwargs: Any) -> Any: example_ids[kwargs.get("id")] = kwargs.get("reference_example_id") return unittest.mock.MagicMock() client = unittest.mock.MagicMock(spec=Client) client.create_run = mock_create_run tracer = LangChainTracer(client=client) old_persist_run_single = tracer._persist_run_single def new_persist_run_single(run: Run) -> None: time.sleep(0.01) old_persist_run_single(run) with unittest.mock.patch.object( tracer, "_persist_run_single", new=new_persist_run_single ): run_id_1 = UUID("9d878ab3-e5ca-4218-aef6-44cbdc90160a") run_id_2 = UUID("f1f9fa53-8b2f-4742-bdbc-38215f7bd1e1") example_id_1 = UUID("57e42c57-8c79-4d9f-8765-bf6cd3a98055") tracer.example_id = example_id_1 tracer.on_llm_start({"name": "example_1"}, ["foo"], run_id=run_id_1) tracer.on_llm_end(LLMResult(generations=[], llm_output={}), run_id=run_id_1) example_id_2 = UUID("4f31216e-7c26-4027-a5fd-0bbf9ace17dc") tracer.example_id = example_id_2 tracer.on_llm_start({"name": "example_2"}, ["foo"], run_id=run_id_2) tracer.on_llm_end(LLMResult(generations=[], llm_output={}), run_id=run_id_2) tracer.example_id = None expected_example_ids = { run_id_1: example_id_1, run_id_2: example_id_2, } tracer.wait_for_futures() assert example_ids == expected_example_ids def test_log_lock() -> None: """Test that example assigned at callback start/end is honored.""" client = unittest.mock.MagicMock(spec=Client) tracer = LangChainTracer(client=client) with unittest.mock.patch.object(tracer, "_persist_run_single", new=lambda _: _): run_id_1 = UUID("9d878ab3-e5ca-4218-aef6-44cbdc90160a") lock = threading.Lock() tracer.on_chain_start({"name": "example_1"}, {"input": lock}, run_id=run_id_1) tracer.on_chain_end({}, run_id=run_id_1) tracer.wait_for_futures() class LangChainProjectNameTest(unittest.TestCase): """ Test that the project name is set correctly for runs. """ class SetProperTracerProjectTestCase: def __init__( self, test_name: str, envvars: Dict[str, str], expected_project_name: str ): self.test_name = test_name self.envvars = envvars self.expected_project_name = expected_project_name def test_correct_get_tracer_project(self) -> None: cases = [ self.SetProperTracerProjectTestCase( test_name="default to 'default' when no project provided", envvars={}, expected_project_name="default", ), self.SetProperTracerProjectTestCase( test_name="use session_name for legacy tracers", envvars={"LANGCHAIN_SESSION": "old_timey_session"}, expected_project_name="old_timey_session", ), self.SetProperTracerProjectTestCase( test_name="use LANGCHAIN_PROJECT over SESSION_NAME", envvars={ "LANGCHAIN_SESSION": "old_timey_session", "LANGCHAIN_PROJECT": "modern_session", }, expected_project_name="modern_session", ), ] for case in cases: with self.subTest(msg=case.test_name): with pytest.MonkeyPatch.context() as mp: for k, v in case.envvars.items(): mp.setenv(k, v) client = unittest.mock.MagicMock(spec=Client) tracer = LangChainTracer(client=client) projects = [] def mock_create_run(**kwargs: Any) -> Any: projects.append(kwargs.get("project_name")) return unittest.mock.MagicMock() client.create_run = mock_create_run tracer.on_llm_start( {"name": "example_1"}, ["foo"], run_id=UUID("9d878ab3-e5ca-4218-aef6-44cbdc90160a"), ) tracer.wait_for_futures() assert ( len(projects) == 1 and projects[0] == case.expected_project_name )
[ "langchain_core.tracers.langchain.LangChainTracer", "langchain_core.outputs.LLMResult" ]
[((639, 675), 'unittest.mock.MagicMock', 'unittest.mock.MagicMock', ([], {'spec': 'Client'}), '(spec=Client)\n', (662, 675), False, 'import unittest\n'), ((729, 759), 'langchain_core.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'client': 'client'}), '(client=client)\n', (744, 759), False, 'from langchain_core.tracers.langchain import LangChainTracer\n'), ((2058, 2094), 'unittest.mock.MagicMock', 'unittest.mock.MagicMock', ([], {'spec': 'Client'}), '(spec=Client)\n', (2081, 2094), False, 'import unittest\n'), ((2108, 2138), 'langchain_core.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'client': 'client'}), '(client=client)\n', (2123, 2138), False, 'from langchain_core.tracers.langchain import LangChainTracer\n'), ((599, 624), 'unittest.mock.MagicMock', 'unittest.mock.MagicMock', ([], {}), '()\n', (622, 624), False, 'import unittest\n'), ((875, 891), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (885, 891), False, 'import time\n'), ((938, 1028), 'unittest.mock.patch.object', 'unittest.mock.patch.object', (['tracer', '"""_persist_run_single"""'], {'new': 'new_persist_run_single'}), "(tracer, '_persist_run_single', new=\n new_persist_run_single)\n", (964, 1028), False, 'import unittest\n'), ((1058, 1102), 'uuid.UUID', 'UUID', (['"""9d878ab3-e5ca-4218-aef6-44cbdc90160a"""'], {}), "('9d878ab3-e5ca-4218-aef6-44cbdc90160a')\n", (1062, 1102), False, 'from uuid import UUID\n'), ((1122, 1166), 'uuid.UUID', 'UUID', (['"""f1f9fa53-8b2f-4742-bdbc-38215f7bd1e1"""'], {}), "('f1f9fa53-8b2f-4742-bdbc-38215f7bd1e1')\n", (1126, 1166), False, 'from uuid import UUID\n'), ((1190, 1234), 'uuid.UUID', 'UUID', (['"""57e42c57-8c79-4d9f-8765-bf6cd3a98055"""'], {}), "('57e42c57-8c79-4d9f-8765-bf6cd3a98055')\n", (1194, 1234), False, 'from uuid import UUID\n'), ((1461, 1505), 'uuid.UUID', 'UUID', (['"""4f31216e-7c26-4027-a5fd-0bbf9ace17dc"""'], {}), "('4f31216e-7c26-4027-a5fd-0bbf9ace17dc')\n", (1465, 1505), False, 'from uuid import UUID\n'), ((2149, 2223), 'unittest.mock.patch.object', 'unittest.mock.patch.object', (['tracer', '"""_persist_run_single"""'], {'new': '(lambda _: _)'}), "(tracer, '_persist_run_single', new=lambda _: _)\n", (2175, 2223), False, 'import unittest\n'), ((2244, 2288), 'uuid.UUID', 'UUID', (['"""9d878ab3-e5ca-4218-aef6-44cbdc90160a"""'], {}), "('9d878ab3-e5ca-4218-aef6-44cbdc90160a')\n", (2248, 2288), False, 'from uuid import UUID\n'), ((2304, 2320), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2318, 2320), False, 'import threading\n'), ((1379, 1419), 'langchain_core.outputs.LLMResult', 'LLMResult', ([], {'generations': '[]', 'llm_output': '{}'}), '(generations=[], llm_output={})\n', (1388, 1419), False, 'from langchain_core.outputs import LLMResult\n'), ((1650, 1690), 'langchain_core.outputs.LLMResult', 'LLMResult', ([], {'generations': '[]', 'llm_output': '{}'}), '(generations=[], llm_output={})\n', (1659, 1690), False, 'from langchain_core.outputs import LLMResult\n'), ((3928, 3956), 'pytest.MonkeyPatch.context', 'pytest.MonkeyPatch.context', ([], {}), '()\n', (3954, 3956), False, 'import pytest\n'), ((4088, 4124), 'unittest.mock.MagicMock', 'unittest.mock.MagicMock', ([], {'spec': 'Client'}), '(spec=Client)\n', (4111, 4124), False, 'import unittest\n'), ((4154, 4184), 'langchain_core.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'client': 'client'}), '(client=client)\n', (4169, 4184), False, 'from langchain_core.tracers.langchain import LangChainTracer\n'), ((4382, 4407), 'unittest.mock.MagicMock', 'unittest.mock.MagicMock', ([], {}), '()\n', (4405, 4407), False, 'import unittest\n'), ((4618, 4662), 'uuid.UUID', 'UUID', (['"""9d878ab3-e5ca-4218-aef6-44cbdc90160a"""'], {}), "('9d878ab3-e5ca-4218-aef6-44cbdc90160a')\n", (4622, 4662), False, 'from uuid import UUID\n')]
import os import cassio import langchain from langchain.cache import CassandraCache from langchain_community.chat_models import ChatOpenAI from langchain_core.messages import BaseMessage from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableLambda use_cassandra = int(os.environ.get("USE_CASSANDRA_CLUSTER", "0")) if use_cassandra: from .cassandra_cluster_init import get_cassandra_connection session, keyspace = get_cassandra_connection() cassio.init( session=session, keyspace=keyspace, ) else: cassio.init( token=os.environ["ASTRA_DB_APPLICATION_TOKEN"], database_id=os.environ["ASTRA_DB_ID"], keyspace=os.environ.get("ASTRA_DB_KEYSPACE"), ) # inits langchain.llm_cache = CassandraCache(session=None, keyspace=None) llm = ChatOpenAI() # custom runnables def msg_splitter(msg: BaseMessage): return [w.strip() for w in msg.content.split(",") if w.strip()] # synonym-route preparation synonym_prompt = ChatPromptTemplate.from_template( "List up to five comma-separated synonyms of this word: {word}" ) chain = synonym_prompt | llm | RunnableLambda(msg_splitter)
[ "langchain_core.prompts.ChatPromptTemplate.from_template", "langchain_community.chat_models.ChatOpenAI", "langchain_core.runnables.RunnableLambda", "langchain.cache.CassandraCache" ]
[((788, 831), 'langchain.cache.CassandraCache', 'CassandraCache', ([], {'session': 'None', 'keyspace': 'None'}), '(session=None, keyspace=None)\n', (802, 831), False, 'from langchain.cache import CassandraCache\n'), ((838, 850), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (848, 850), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1023, 1125), 'langchain_core.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['"""List up to five comma-separated synonyms of this word: {word}"""'], {}), "(\n 'List up to five comma-separated synonyms of this word: {word}')\n", (1055, 1125), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((315, 359), 'os.environ.get', 'os.environ.get', (['"""USE_CASSANDRA_CLUSTER"""', '"""0"""'], {}), "('USE_CASSANDRA_CLUSTER', '0')\n", (329, 359), False, 'import os\n'), ((500, 547), 'cassio.init', 'cassio.init', ([], {'session': 'session', 'keyspace': 'keyspace'}), '(session=session, keyspace=keyspace)\n', (511, 547), False, 'import cassio\n'), ((1159, 1187), 'langchain_core.runnables.RunnableLambda', 'RunnableLambda', (['msg_splitter'], {}), '(msg_splitter)\n', (1173, 1187), False, 'from langchain_core.runnables import RunnableLambda\n'), ((714, 749), 'os.environ.get', 'os.environ.get', (['"""ASTRA_DB_KEYSPACE"""'], {}), "('ASTRA_DB_KEYSPACE')\n", (728, 749), False, 'import os\n')]
"""A tracer that runs evaluators over completed runs.""" from __future__ import annotations import logging from concurrent.futures import Future, ThreadPoolExecutor from typing import Any, Dict, List, Optional, Sequence, Set, Union from uuid import UUID import langsmith from langsmith.evaluation.evaluator import EvaluationResult from langchain.callbacks import manager from langchain.callbacks.tracers import langchain as langchain_tracer from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run logger = logging.getLogger(__name__) class EvaluatorCallbackHandler(BaseTracer): """A tracer that runs a run evaluator whenever a run is persisted. Parameters ---------- evaluators : Sequence[RunEvaluator] The run evaluators to apply to all top level runs. max_workers : int, optional The maximum number of worker threads to use for running the evaluators. If not specified, it will default to the number of evaluators. client : LangSmith Client, optional The LangSmith client instance to use for evaluating the runs. If not specified, a new instance will be created. example_id : Union[UUID, str], optional The example ID to be associated with the runs. project_name : str, optional The LangSmith project name to be organize eval chain runs under. Attributes ---------- example_id : Union[UUID, None] The example ID associated with the runs. client : Client The LangSmith client instance used for evaluating the runs. evaluators : Sequence[RunEvaluator] The sequence of run evaluators to be executed. executor : ThreadPoolExecutor The thread pool executor used for running the evaluators. futures : Set[Future] The set of futures representing the running evaluators. skip_unfinished : bool Whether to skip runs that are not finished or raised an error. project_name : Optional[str] The LangSmith project name to be organize eval chain runs under. """ name = "evaluator_callback_handler" def __init__( self, evaluators: Sequence[langsmith.RunEvaluator], max_workers: Optional[int] = None, client: Optional[langsmith.Client] = None, example_id: Optional[Union[UUID, str]] = None, skip_unfinished: bool = True, project_name: Optional[str] = "evaluators", **kwargs: Any, ) -> None: super().__init__(**kwargs) self.example_id = ( UUID(example_id) if isinstance(example_id, str) else example_id ) self.client = client or langchain_tracer.get_client() self.evaluators = evaluators self.max_workers = max_workers or len(evaluators) self.futures: Set[Future] = set() self.skip_unfinished = skip_unfinished self.project_name = project_name self.logged_eval_results: Dict[str, List[EvaluationResult]] = {} def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None: """Evaluate the run in the project. Parameters ---------- run : Run The run to be evaluated. evaluator : RunEvaluator The evaluator to use for evaluating the run. """ try: if self.project_name is None: eval_result = self.client.evaluate_run(run, evaluator) with manager.tracing_v2_enabled( project_name=self.project_name, tags=["eval"], client=self.client ): eval_result = self.client.evaluate_run(run, evaluator) except Exception as e: logger.error( f"Error evaluating run {run.id} with " f"{evaluator.__class__.__name__}: {e}", exc_info=True, ) raise e example_id = str(run.reference_example_id) self.logged_eval_results.setdefault(example_id, []).append(eval_result) def _persist_run(self, run: Run) -> None: """Run the evaluator on the run. Parameters ---------- run : Run The run to be evaluated. """ if self.skip_unfinished and not run.outputs: logger.debug(f"Skipping unfinished run {run.id}") return run_ = run.copy() run_.reference_example_id = self.example_id if self.max_workers > 0: with ThreadPoolExecutor(max_workers=self.max_workers) as executor: list( executor.map( self._evaluate_in_project, [run_ for _ in range(len(self.evaluators))], self.evaluators, ) ) else: for evaluator in self.evaluators: self._evaluate_in_project(run_, evaluator)
[ "langchain.callbacks.tracers.langchain.get_client", "langchain.callbacks.manager.tracing_v2_enabled" ]
[((562, 589), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (579, 589), False, 'import logging\n'), ((2581, 2597), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2585, 2597), False, 'from uuid import UUID\n'), ((2687, 2716), 'langchain.callbacks.tracers.langchain.get_client', 'langchain_tracer.get_client', ([], {}), '()\n', (2714, 2716), True, 'from langchain.callbacks.tracers import langchain as langchain_tracer\n'), ((3489, 3586), 'langchain.callbacks.manager.tracing_v2_enabled', 'manager.tracing_v2_enabled', ([], {'project_name': 'self.project_name', 'tags': "['eval']", 'client': 'self.client'}), "(project_name=self.project_name, tags=['eval'],\n client=self.client)\n", (3515, 3586), False, 'from langchain.callbacks import manager\n'), ((4506, 4554), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'self.max_workers'}), '(max_workers=self.max_workers)\n', (4524, 4554), False, 'from concurrent.futures import Future, ThreadPoolExecutor\n')]
"""A tracer that runs evaluators over completed runs.""" from __future__ import annotations import logging from concurrent.futures import Future, ThreadPoolExecutor from typing import Any, Dict, List, Optional, Sequence, Set, Union from uuid import UUID import langsmith from langsmith.evaluation.evaluator import EvaluationResult from langchain.callbacks import manager from langchain.callbacks.tracers import langchain as langchain_tracer from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run logger = logging.getLogger(__name__) class EvaluatorCallbackHandler(BaseTracer): """A tracer that runs a run evaluator whenever a run is persisted. Parameters ---------- evaluators : Sequence[RunEvaluator] The run evaluators to apply to all top level runs. max_workers : int, optional The maximum number of worker threads to use for running the evaluators. If not specified, it will default to the number of evaluators. client : LangSmith Client, optional The LangSmith client instance to use for evaluating the runs. If not specified, a new instance will be created. example_id : Union[UUID, str], optional The example ID to be associated with the runs. project_name : str, optional The LangSmith project name to be organize eval chain runs under. Attributes ---------- example_id : Union[UUID, None] The example ID associated with the runs. client : Client The LangSmith client instance used for evaluating the runs. evaluators : Sequence[RunEvaluator] The sequence of run evaluators to be executed. executor : ThreadPoolExecutor The thread pool executor used for running the evaluators. futures : Set[Future] The set of futures representing the running evaluators. skip_unfinished : bool Whether to skip runs that are not finished or raised an error. project_name : Optional[str] The LangSmith project name to be organize eval chain runs under. """ name = "evaluator_callback_handler" def __init__( self, evaluators: Sequence[langsmith.RunEvaluator], max_workers: Optional[int] = None, client: Optional[langsmith.Client] = None, example_id: Optional[Union[UUID, str]] = None, skip_unfinished: bool = True, project_name: Optional[str] = "evaluators", **kwargs: Any, ) -> None: super().__init__(**kwargs) self.example_id = ( UUID(example_id) if isinstance(example_id, str) else example_id ) self.client = client or langchain_tracer.get_client() self.evaluators = evaluators self.max_workers = max_workers or len(evaluators) self.futures: Set[Future] = set() self.skip_unfinished = skip_unfinished self.project_name = project_name self.logged_eval_results: Dict[str, List[EvaluationResult]] = {} def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None: """Evaluate the run in the project. Parameters ---------- run : Run The run to be evaluated. evaluator : RunEvaluator The evaluator to use for evaluating the run. """ try: if self.project_name is None: eval_result = self.client.evaluate_run(run, evaluator) with manager.tracing_v2_enabled( project_name=self.project_name, tags=["eval"], client=self.client ): eval_result = self.client.evaluate_run(run, evaluator) except Exception as e: logger.error( f"Error evaluating run {run.id} with " f"{evaluator.__class__.__name__}: {e}", exc_info=True, ) raise e example_id = str(run.reference_example_id) self.logged_eval_results.setdefault(example_id, []).append(eval_result) def _persist_run(self, run: Run) -> None: """Run the evaluator on the run. Parameters ---------- run : Run The run to be evaluated. """ if self.skip_unfinished and not run.outputs: logger.debug(f"Skipping unfinished run {run.id}") return run_ = run.copy() run_.reference_example_id = self.example_id if self.max_workers > 0: with ThreadPoolExecutor(max_workers=self.max_workers) as executor: list( executor.map( self._evaluate_in_project, [run_ for _ in range(len(self.evaluators))], self.evaluators, ) ) else: for evaluator in self.evaluators: self._evaluate_in_project(run_, evaluator)
[ "langchain.callbacks.tracers.langchain.get_client", "langchain.callbacks.manager.tracing_v2_enabled" ]
[((562, 589), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (579, 589), False, 'import logging\n'), ((2581, 2597), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2585, 2597), False, 'from uuid import UUID\n'), ((2687, 2716), 'langchain.callbacks.tracers.langchain.get_client', 'langchain_tracer.get_client', ([], {}), '()\n', (2714, 2716), True, 'from langchain.callbacks.tracers import langchain as langchain_tracer\n'), ((3489, 3586), 'langchain.callbacks.manager.tracing_v2_enabled', 'manager.tracing_v2_enabled', ([], {'project_name': 'self.project_name', 'tags': "['eval']", 'client': 'self.client'}), "(project_name=self.project_name, tags=['eval'],\n client=self.client)\n", (3515, 3586), False, 'from langchain.callbacks import manager\n'), ((4506, 4554), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'self.max_workers'}), '(max_workers=self.max_workers)\n', (4524, 4554), False, 'from concurrent.futures import Future, ThreadPoolExecutor\n')]
"""A tracer that runs evaluators over completed runs.""" from __future__ import annotations import logging from concurrent.futures import Future, ThreadPoolExecutor from typing import Any, Dict, List, Optional, Sequence, Set, Union from uuid import UUID import langsmith from langsmith.evaluation.evaluator import EvaluationResult from langchain.callbacks import manager from langchain.callbacks.tracers import langchain as langchain_tracer from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run logger = logging.getLogger(__name__) class EvaluatorCallbackHandler(BaseTracer): """A tracer that runs a run evaluator whenever a run is persisted. Parameters ---------- evaluators : Sequence[RunEvaluator] The run evaluators to apply to all top level runs. max_workers : int, optional The maximum number of worker threads to use for running the evaluators. If not specified, it will default to the number of evaluators. client : LangSmith Client, optional The LangSmith client instance to use for evaluating the runs. If not specified, a new instance will be created. example_id : Union[UUID, str], optional The example ID to be associated with the runs. project_name : str, optional The LangSmith project name to be organize eval chain runs under. Attributes ---------- example_id : Union[UUID, None] The example ID associated with the runs. client : Client The LangSmith client instance used for evaluating the runs. evaluators : Sequence[RunEvaluator] The sequence of run evaluators to be executed. executor : ThreadPoolExecutor The thread pool executor used for running the evaluators. futures : Set[Future] The set of futures representing the running evaluators. skip_unfinished : bool Whether to skip runs that are not finished or raised an error. project_name : Optional[str] The LangSmith project name to be organize eval chain runs under. """ name = "evaluator_callback_handler" def __init__( self, evaluators: Sequence[langsmith.RunEvaluator], max_workers: Optional[int] = None, client: Optional[langsmith.Client] = None, example_id: Optional[Union[UUID, str]] = None, skip_unfinished: bool = True, project_name: Optional[str] = "evaluators", **kwargs: Any, ) -> None: super().__init__(**kwargs) self.example_id = ( UUID(example_id) if isinstance(example_id, str) else example_id ) self.client = client or langchain_tracer.get_client() self.evaluators = evaluators self.max_workers = max_workers or len(evaluators) self.futures: Set[Future] = set() self.skip_unfinished = skip_unfinished self.project_name = project_name self.logged_eval_results: Dict[str, List[EvaluationResult]] = {} def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None: """Evaluate the run in the project. Parameters ---------- run : Run The run to be evaluated. evaluator : RunEvaluator The evaluator to use for evaluating the run. """ try: if self.project_name is None: eval_result = self.client.evaluate_run(run, evaluator) with manager.tracing_v2_enabled( project_name=self.project_name, tags=["eval"], client=self.client ): eval_result = self.client.evaluate_run(run, evaluator) except Exception as e: logger.error( f"Error evaluating run {run.id} with " f"{evaluator.__class__.__name__}: {e}", exc_info=True, ) raise e example_id = str(run.reference_example_id) self.logged_eval_results.setdefault(example_id, []).append(eval_result) def _persist_run(self, run: Run) -> None: """Run the evaluator on the run. Parameters ---------- run : Run The run to be evaluated. """ if self.skip_unfinished and not run.outputs: logger.debug(f"Skipping unfinished run {run.id}") return run_ = run.copy() run_.reference_example_id = self.example_id if self.max_workers > 0: with ThreadPoolExecutor(max_workers=self.max_workers) as executor: list( executor.map( self._evaluate_in_project, [run_ for _ in range(len(self.evaluators))], self.evaluators, ) ) else: for evaluator in self.evaluators: self._evaluate_in_project(run_, evaluator)
[ "langchain.callbacks.tracers.langchain.get_client", "langchain.callbacks.manager.tracing_v2_enabled" ]
[((562, 589), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (579, 589), False, 'import logging\n'), ((2581, 2597), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2585, 2597), False, 'from uuid import UUID\n'), ((2687, 2716), 'langchain.callbacks.tracers.langchain.get_client', 'langchain_tracer.get_client', ([], {}), '()\n', (2714, 2716), True, 'from langchain.callbacks.tracers import langchain as langchain_tracer\n'), ((3489, 3586), 'langchain.callbacks.manager.tracing_v2_enabled', 'manager.tracing_v2_enabled', ([], {'project_name': 'self.project_name', 'tags': "['eval']", 'client': 'self.client'}), "(project_name=self.project_name, tags=['eval'],\n client=self.client)\n", (3515, 3586), False, 'from langchain.callbacks import manager\n'), ((4506, 4554), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'self.max_workers'}), '(max_workers=self.max_workers)\n', (4524, 4554), False, 'from concurrent.futures import Future, ThreadPoolExecutor\n')]
"""Push and pull to the LangChain Hub.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional from langchain.load.dump import dumps from langchain.load.load import loads from langchain.utils import get_from_env if TYPE_CHECKING: from langchainhub import Client def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client: try: from langchainhub import Client except ImportError as e: raise ImportError( "Could not import langchainhub, please install with `pip install " "langchainhub`." ) from e api_url = api_url or get_from_env("api_url", "LANGCHAIN_HUB_API_URL") api_key = api_key or get_from_env("api_key", "LANGCHAIN_HUB_API_KEY", default="") api_key = api_key or get_from_env("api_key", "LANGCHAIN_API_KEY") return Client(api_url, api_key=api_key) def push( repo_full_name: str, object: Any, *, api_url: Optional[str] = None, api_key: Optional[str] = None, parent_commit_hash: Optional[str] = "latest", ) -> str: """ Pushes an object to the hub and returns the URL. """ client = _get_client(api_url=api_url, api_key=api_key) manifest_json = dumps(object) resp = client.push( repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash ) commit_hash: str = resp["commit"]["commit_hash"] return commit_hash def pull( owner_repo_commit: str, *, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> Any: """ Pulls an object from the hub and returns it. """ client = _get_client(api_url=api_url, api_key=api_key) resp: str = client.pull(owner_repo_commit) return loads(resp)
[ "langchain.load.load.loads", "langchainhub.Client", "langchain.load.dump.dumps", "langchain.utils.get_from_env" ]
[((862, 894), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (868, 894), False, 'from langchainhub import Client\n'), ((1234, 1247), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1239, 1247), False, 'from langchain.load.dump import dumps\n'), ((1740, 1751), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (1745, 1751), False, 'from langchain.load.load import loads\n'), ((646, 694), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_url"""', '"""LANGCHAIN_HUB_API_URL"""'], {}), "('api_url', 'LANGCHAIN_HUB_API_URL')\n", (658, 694), False, 'from langchain.utils import get_from_env\n'), ((720, 780), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_HUB_API_KEY"""'], {'default': '""""""'}), "('api_key', 'LANGCHAIN_HUB_API_KEY', default='')\n", (732, 780), False, 'from langchain.utils import get_from_env\n'), ((806, 850), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_API_KEY"""'], {}), "('api_key', 'LANGCHAIN_API_KEY')\n", (818, 850), False, 'from langchain.utils import get_from_env\n')]
"""Push and pull to the LangChain Hub.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional from langchain.load.dump import dumps from langchain.load.load import loads from langchain.utils import get_from_env if TYPE_CHECKING: from langchainhub import Client def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client: try: from langchainhub import Client except ImportError as e: raise ImportError( "Could not import langchainhub, please install with `pip install " "langchainhub`." ) from e api_url = api_url or get_from_env("api_url", "LANGCHAIN_HUB_API_URL") api_key = api_key or get_from_env("api_key", "LANGCHAIN_HUB_API_KEY", default="") api_key = api_key or get_from_env("api_key", "LANGCHAIN_API_KEY") return Client(api_url, api_key=api_key) def push( repo_full_name: str, object: Any, *, api_url: Optional[str] = None, api_key: Optional[str] = None, parent_commit_hash: Optional[str] = "latest", ) -> str: """ Pushes an object to the hub and returns the URL. """ client = _get_client(api_url=api_url, api_key=api_key) manifest_json = dumps(object) resp = client.push( repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash ) commit_hash: str = resp["commit"]["commit_hash"] return commit_hash def pull( owner_repo_commit: str, *, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> Any: """ Pulls an object from the hub and returns it. """ client = _get_client(api_url=api_url, api_key=api_key) resp: str = client.pull(owner_repo_commit) return loads(resp)
[ "langchain.load.load.loads", "langchainhub.Client", "langchain.load.dump.dumps", "langchain.utils.get_from_env" ]
[((862, 894), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (868, 894), False, 'from langchainhub import Client\n'), ((1234, 1247), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1239, 1247), False, 'from langchain.load.dump import dumps\n'), ((1740, 1751), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (1745, 1751), False, 'from langchain.load.load import loads\n'), ((646, 694), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_url"""', '"""LANGCHAIN_HUB_API_URL"""'], {}), "('api_url', 'LANGCHAIN_HUB_API_URL')\n", (658, 694), False, 'from langchain.utils import get_from_env\n'), ((720, 780), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_HUB_API_KEY"""'], {'default': '""""""'}), "('api_key', 'LANGCHAIN_HUB_API_KEY', default='')\n", (732, 780), False, 'from langchain.utils import get_from_env\n'), ((806, 850), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_API_KEY"""'], {}), "('api_key', 'LANGCHAIN_API_KEY')\n", (818, 850), False, 'from langchain.utils import get_from_env\n')]
import os import utils import traceback from langchain.chains.qa_with_sources import load_qa_with_sources_chain from langchain.chains import ConversationChain from langchain.llms import OpenAI import langchain from langchain.cache import InMemoryCache from langchain.llms import OpenAI from langchain.chains.conversation.memory import ConversationSummaryBufferMemory,ConversationBufferMemory,ConversationBufferWindowMemory from langchain.prompts import PromptTemplate from embeddings import EmbeddingsManager from flask import Flask, send_from_directory import json import time import threading import secrets import string import hashlib from flask import request from langchain.cache import InMemoryCache,SQLiteCache import re import requests from waitress import serve from translator import Translator import sys from query.discoursequery import DiscourseQuery from query.embeddingsquery import EmbeddingsQuery from Summary import Summary import uuid from langchain.llms import NLPCloud from langchain.llms import AI21 from langchain.llms import Cohere from SmartCache import SmartCache CONFIG=None QUERIERS=[] args=sys.argv confiFile=args[1] if len(args)>1 else "config.json" print("Use config file", confiFile) with open(confiFile, "r") as f: CONFIG=json.load(f) EmbeddingsManager.init(CONFIG) Summary.init(CONFIG) QUERIERS=[ EmbeddingsQuery(CONFIG), DiscourseQuery( CONFIG,CONFIG["JME_HUB_URL"], searchFilter=CONFIG["JME_HUB_SEARCH_FILTER"], knowledgeCutoff=CONFIG["JME_HUB_KNOWLEDGE_CUTOFF"] ) ] Translator.init(CONFIG) def getAffineDocs(question,context,keywords,shortQuestion, wordSalad=None, unitFilter=None, maxFragmentsToReturn=3, maxFragmentsToSelect=12,merge=False): affineDocs=[] for q in QUERIERS: print("Get affine docs from",q,"using question",question,"with context",context,"and keywords",keywords) t=time.time() v=q.getAffineDocs( question, context, keywords,shortQuestion, wordSalad, unitFilter, maxFragmentsToReturn=maxFragmentsToReturn, maxFragmentsToSelect=maxFragmentsToSelect, merge=merge ) print("Completed in",time.time()-t,"seconds.") if v!=None: affineDocs.extend(v) return affineDocs def rewriteError(error): if error.startswith("Rate limit reached ") : return "Rate limit." def rewrite(question): # replace app, applet, game, application with simple application question=re.sub(r"\b(app|applet|game|application)\b", "simple application", question, flags=re.IGNORECASE) return question def createChain(): # Backward compatibility model_name=CONFIG.get("OPENAI_MODEL","text-davinci-003") llm_name="openai" ######## llmx=CONFIG.get("LLM_MODEL",None) # "openai:text-davinci-003" "cohere:xlarge" if llmx!=None: if ":" in llmx: llm_name,model_name=llmx.split(":") else: llm_name,model_name=llmx.split(".") template = "" template_path="prompts/"+llm_name+"."+model_name+".txt" if not os.path.exists(template_path): template_path="prompts/openai.text-davinci-003.txt" with open(template_path, "r") as f: template=f.read() prompt = PromptTemplate( input_variables=[ "history", "question", "summaries"], template=template ) llm=None history_length=700 if llm_name=="openai": max_tokens=512 temperature=0.0 if model_name=="text-davinci-003": max_tokens=512 elif model_name=="code-davinci-002": max_tokens=1024 #history_length=1024 llm=OpenAI( temperature=temperature, model_name=model_name, max_tokens=max_tokens, ) elif llm_name=="cohere": llm=Cohere( model=model_name, max_tokens=700 ) history_length=200 elif llm_name=="ai21": llm=AI21( temperature=0.7, model=model_name, ) elif llm_name=="nlpcloud": llm=NLPCloud( model_name=model_name, ) else: raise Exception("Unknown LLM "+llm_name) print("Use model ",model_name,"from",llm_name) memory=ConversationSummaryBufferMemory(llm=llm, max_token_limit=history_length,human_prefix="QUESTION",ai_prefix="ANSWER", memory_key="history", input_key="question") chain = load_qa_with_sources_chain( llm, memory=memory, prompt=prompt, verbose=True, ) return chain def extractQuestionData(question,wordSalad): shortQuestion=Summary.summarizeMarkdown(question,min_length=100,max_length=1024,withCodeBlocks=False) context=Summary.summarizeText(wordSalad,min_length=20,max_length=32) keywords=[] keywords.extend(Summary.getKeywords(shortQuestion,2)) keywords.extend(Summary.getKeywords(Summary.summarizeText(wordSalad,min_length=10,max_length=20),3)) return [question,shortQuestion,context,keywords,wordSalad] def queryChain(chain,question): wordSalad="" for h in chain.memory.buffer: wordSalad+=h+" " wordSalad+=" "+question [question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda :extractQuestionData(question,wordSalad)) affineDocs=utils.enqueue(lambda :getAffineDocs(question,context,keywords,shortQuestion,wordSalad)) print("Found ",len(affineDocs), " affine docs") print("Q: ", shortQuestion) output=chain({"input_documents": affineDocs, "question": shortQuestion}, return_only_outputs=True) print("A :",output) return output sessions={} langchain.llm_cache = SmartCache(CONFIG)#SQLiteCache(database_path=CONFIG["CACHE_PATH"]+"/langchain.db") def clearSessions(): while True: time.sleep(60*5) for session in sessions: if sessions[session]["timeout"] < time.time(): del sessions[session] threading.Thread(target=clearSessions).start() def createSessionSecret(): hex_chars = string.hexdigits timeHash=hashlib.sha256(str(time.time()).encode("utf-8")).hexdigest()[:12] return ''.join(secrets.choice(hex_chars) for i in range(64))+timeHash app = Flask(__name__) @app.route("/langs") def langs(): return json.dumps(Translator.getLangs()) @app.route("/session",methods = ['POST']) def session(): body=request.get_json() lang=body["lang"] if "lang" in body else "en" if lang=="auto": lang="en" if not "sessionSecret" in body or body["sessionSecret"].strip()=="": sessionSecret=createSessionSecret() else: sessionSecret=body["sessionSecret"] if sessionSecret not in sessions: sessions[sessionSecret]={ "chain": createChain(), "timeout": time.time()+60*30 } else: sessions[sessionSecret]["timeout"]=time.time()+60*30 welcomeText="" welcomeText+=Translator.translate("en", lang,"Hi there! I'm an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics.") welcomeText+="<br><br>" welcomeText+="<footer><span class=\"material-symbols-outlined\">tips_and_updates</span><span>"+Translator.translate("en", lang,"This chat bot is intended to provide helpful information, but accuracy is not guaranteed.")+"</span></footer>" return json.dumps( { "sessionSecret": sessionSecret, "helloText":Translator.translate("en",lang,"Who are you?"), "welcomeText":welcomeText }) @app.route("/query",methods = ['POST']) def query(): try: body=request.get_json() question=rewrite(body["question"]) lang=body["lang"] if "lang" in body else "en" if lang == "auto": lang=Translator.detect(question) if lang!="en": question=Translator.translate(lang,"en",question) if len(question)==0: raise Exception("Question is empty") sessionSecret=body["sessionSecret"] if sessionSecret not in sessions: return json.dumps({"error": "Session expired"}) chain=sessions[sessionSecret]["chain"] output=queryChain(chain,question) if lang!="en": output["output_text"]=Translator.translate("en",lang,output["output_text"]) #print(chain.memory.buffer) return json.dumps(output) except Exception as e: print(e) print(traceback.format_exc()) errorStr=str(e) errorStr=rewriteError(errorStr) return json.dumps({"error": errorStr}) @app.route('/<path:filename>') def serveFrontend(filename): return send_from_directory('frontend/', filename) @app.route('/') def serveIndex(): return send_from_directory('frontend/', "index.html") @app.route('/docs', methods=['POST']) def docs(): body=request.get_json() question=body["question"] maxFragmentsToReturn=int(body.get("maxFragmentsToReturn",3)) maxFragmentsToSelect=int(body.get("maxFragmentsToReturn",6)) wordSalad=body.get("context","")+" "+question [question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda : extractQuestionData(question,wordSalad)) affineDocs=utils.enqueue(lambda : getAffineDocs( question,context,keywords,shortQuestion,wordSalad, maxFragmentsToReturn=maxFragmentsToReturn, maxFragmentsToSelect=maxFragmentsToSelect )) plainDocs=[ { "content":doc.page_content, "metadata":doc.metadata } for doc in affineDocs ] return json.dumps(plainDocs) serve(app, host="0.0.0.0", port=8080, connection_limit=1000)
[ "langchain.chains.conversation.memory.ConversationSummaryBufferMemory", "langchain.llms.OpenAI", "langchain.llms.AI21", "langchain.llms.Cohere", "langchain.chains.qa_with_sources.load_qa_with_sources_chain", "langchain.llms.NLPCloud", "langchain.prompts.PromptTemplate" ]
[((5785, 5803), 'SmartCache.SmartCache', 'SmartCache', (['CONFIG'], {}), '(CONFIG)\n', (5795, 5803), False, 'from SmartCache import SmartCache\n'), ((6330, 6345), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (6335, 6345), False, 'from flask import Flask, send_from_directory\n'), ((9830, 9890), 'waitress.serve', 'serve', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8080)', 'connection_limit': '(1000)'}), "(app, host='0.0.0.0', port=8080, connection_limit=1000)\n", (9835, 9890), False, 'from waitress import serve\n'), ((1263, 1275), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1272, 1275), False, 'import json\n'), ((1280, 1310), 'embeddings.EmbeddingsManager.init', 'EmbeddingsManager.init', (['CONFIG'], {}), '(CONFIG)\n', (1302, 1310), False, 'from embeddings import EmbeddingsManager\n'), ((1315, 1335), 'Summary.Summary.init', 'Summary.init', (['CONFIG'], {}), '(CONFIG)\n', (1327, 1335), False, 'from Summary import Summary\n'), ((1591, 1614), 'translator.Translator.init', 'Translator.init', (['CONFIG'], {}), '(CONFIG)\n', (1606, 1614), False, 'from translator import Translator\n'), ((2557, 2659), 're.sub', 're.sub', (['"""\\\\b(app|applet|game|application)\\\\b"""', '"""simple application"""', 'question'], {'flags': 're.IGNORECASE'}), "('\\\\b(app|applet|game|application)\\\\b', 'simple application',\n question, flags=re.IGNORECASE)\n", (2563, 2659), False, 'import re\n'), ((3341, 3432), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'question', 'summaries']", 'template': 'template'}), "(input_variables=['history', 'question', 'summaries'],\n template=template)\n", (3355, 3432), False, 'from langchain.prompts import PromptTemplate\n'), ((4371, 4540), 'langchain.chains.conversation.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'max_token_limit': 'history_length', 'human_prefix': '"""QUESTION"""', 'ai_prefix': '"""ANSWER"""', 'memory_key': '"""history"""', 'input_key': '"""question"""'}), "(llm=llm, max_token_limit=history_length,\n human_prefix='QUESTION', ai_prefix='ANSWER', memory_key='history',\n input_key='question')\n", (4402, 4540), False, 'from langchain.chains.conversation.memory import ConversationSummaryBufferMemory, ConversationBufferMemory, ConversationBufferWindowMemory\n'), ((4543, 4618), 'langchain.chains.qa_with_sources.load_qa_with_sources_chain', 'load_qa_with_sources_chain', (['llm'], {'memory': 'memory', 'prompt': 'prompt', 'verbose': '(True)'}), '(llm, memory=memory, prompt=prompt, verbose=True)\n', (4569, 4618), False, 'from langchain.chains.qa_with_sources import load_qa_with_sources_chain\n'), ((4748, 4842), 'Summary.Summary.summarizeMarkdown', 'Summary.summarizeMarkdown', (['question'], {'min_length': '(100)', 'max_length': '(1024)', 'withCodeBlocks': '(False)'}), '(question, min_length=100, max_length=1024,\n withCodeBlocks=False)\n', (4773, 4842), False, 'from Summary import Summary\n'), ((4849, 4911), 'Summary.Summary.summarizeText', 'Summary.summarizeText', (['wordSalad'], {'min_length': '(20)', 'max_length': '(32)'}), '(wordSalad, min_length=20, max_length=32)\n', (4870, 4911), False, 'from Summary import Summary\n'), ((6497, 6515), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6513, 6515), False, 'from flask import request\n'), ((7046, 7280), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""Hi there! I\'m an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics."""'], {}), '(\'en\', lang,\n "Hi there! I\'m an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics."\n )\n', (7066, 7280), False, 'from translator import Translator\n'), ((8890, 8932), 'flask.send_from_directory', 'send_from_directory', (['"""frontend/"""', 'filename'], {}), "('frontend/', filename)\n", (8909, 8932), False, 'from flask import Flask, send_from_directory\n'), ((8979, 9025), 'flask.send_from_directory', 'send_from_directory', (['"""frontend/"""', '"""index.html"""'], {}), "('frontend/', 'index.html')\n", (8998, 9025), False, 'from flask import Flask, send_from_directory\n'), ((9086, 9104), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (9102, 9104), False, 'from flask import request\n'), ((9806, 9827), 'json.dumps', 'json.dumps', (['plainDocs'], {}), '(plainDocs)\n', (9816, 9827), False, 'import json\n'), ((1359, 1382), 'query.embeddingsquery.EmbeddingsQuery', 'EmbeddingsQuery', (['CONFIG'], {}), '(CONFIG)\n', (1374, 1382), False, 'from query.embeddingsquery import EmbeddingsQuery\n'), ((1392, 1545), 'query.discoursequery.DiscourseQuery', 'DiscourseQuery', (['CONFIG', "CONFIG['JME_HUB_URL']"], {'searchFilter': "CONFIG['JME_HUB_SEARCH_FILTER']", 'knowledgeCutoff': "CONFIG['JME_HUB_KNOWLEDGE_CUTOFF']"}), "(CONFIG, CONFIG['JME_HUB_URL'], searchFilter=CONFIG[\n 'JME_HUB_SEARCH_FILTER'], knowledgeCutoff=CONFIG[\n 'JME_HUB_KNOWLEDGE_CUTOFF'])\n", (1406, 1545), False, 'from query.discoursequery import DiscourseQuery\n'), ((1943, 1954), 'time.time', 'time.time', ([], {}), '()\n', (1952, 1954), False, 'import time\n'), ((3165, 3194), 'os.path.exists', 'os.path.exists', (['template_path'], {}), '(template_path)\n', (3179, 3194), False, 'import os\n'), ((3764, 3841), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, model_name=model_name, max_tokens=max_tokens)\n', (3770, 3841), False, 'from langchain.llms import OpenAI\n'), ((4946, 4983), 'Summary.Summary.getKeywords', 'Summary.getKeywords', (['shortQuestion', '(2)'], {}), '(shortQuestion, 2)\n', (4965, 4983), False, 'from Summary import Summary\n'), ((5914, 5932), 'time.sleep', 'time.sleep', (['(60 * 5)'], {}), '(60 * 5)\n', (5924, 5932), False, 'import time\n'), ((6061, 6099), 'threading.Thread', 'threading.Thread', ([], {'target': 'clearSessions'}), '(target=clearSessions)\n', (6077, 6099), False, 'import threading\n'), ((6407, 6428), 'translator.Translator.getLangs', 'Translator.getLangs', ([], {}), '()\n', (6426, 6428), False, 'from translator import Translator\n'), ((7801, 7819), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (7817, 7819), False, 'from flask import request\n'), ((8605, 8623), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (8615, 8623), False, 'import json\n'), ((3930, 3970), 'langchain.llms.Cohere', 'Cohere', ([], {'model': 'model_name', 'max_tokens': '(700)'}), '(model=model_name, max_tokens=700)\n', (3936, 3970), False, 'from langchain.llms import Cohere\n'), ((5024, 5086), 'Summary.Summary.summarizeText', 'Summary.summarizeText', (['wordSalad'], {'min_length': '(10)', 'max_length': '(20)'}), '(wordSalad, min_length=10, max_length=20)\n', (5045, 5086), False, 'from Summary import Summary\n'), ((6992, 7003), 'time.time', 'time.time', ([], {}), '()\n', (7001, 7003), False, 'import time\n'), ((7398, 7532), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""This chat bot is intended to provide helpful information, but accuracy is not guaranteed."""'], {}), "('en', lang,\n 'This chat bot is intended to provide helpful information, but accuracy is not guaranteed.'\n )\n", (7418, 7532), False, 'from translator import Translator\n'), ((7636, 7684), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""Who are you?"""'], {}), "('en', lang, 'Who are you?')\n", (7656, 7684), False, 'from translator import Translator\n'), ((7971, 7998), 'translator.Translator.detect', 'Translator.detect', (['question'], {}), '(question)\n', (7988, 7998), False, 'from translator import Translator\n'), ((8044, 8086), 'translator.Translator.translate', 'Translator.translate', (['lang', '"""en"""', 'question'], {}), "(lang, 'en', question)\n", (8064, 8086), False, 'from translator import Translator\n'), ((8290, 8330), 'json.dumps', 'json.dumps', (["{'error': 'Session expired'}"], {}), "({'error': 'Session expired'})\n", (8300, 8330), False, 'import json\n'), ((8499, 8554), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', "output['output_text']"], {}), "('en', lang, output['output_text'])\n", (8519, 8554), False, 'from translator import Translator\n'), ((8785, 8816), 'json.dumps', 'json.dumps', (["{'error': errorStr}"], {}), "({'error': errorStr})\n", (8795, 8816), False, 'import json\n'), ((2241, 2252), 'time.time', 'time.time', ([], {}), '()\n', (2250, 2252), False, 'import time\n'), ((4072, 4111), 'langchain.llms.AI21', 'AI21', ([], {'temperature': '(0.7)', 'model': 'model_name'}), '(temperature=0.7, model=model_name)\n', (4076, 4111), False, 'from langchain.llms import AI21\n'), ((6010, 6021), 'time.time', 'time.time', ([], {}), '()\n', (6019, 6021), False, 'import time\n'), ((6267, 6292), 'secrets.choice', 'secrets.choice', (['hex_chars'], {}), '(hex_chars)\n', (6281, 6292), False, 'import secrets\n'), ((6911, 6922), 'time.time', 'time.time', ([], {}), '()\n', (6920, 6922), False, 'import time\n'), ((8682, 8704), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8702, 8704), False, 'import traceback\n'), ((4193, 4224), 'langchain.llms.NLPCloud', 'NLPCloud', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (4201, 4224), False, 'from langchain.llms import NLPCloud\n'), ((6201, 6212), 'time.time', 'time.time', ([], {}), '()\n', (6210, 6212), False, 'import time\n')]
import csv from ctypes import Array from typing import Any, Coroutine, List, Tuple import io import time import re import os from fastapi import UploadFile import asyncio import langchain from langchain.chat_models import ChatOpenAI from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent from langchain.tools import HumanInputRun, PythonAstREPLTool from langchain.callbacks.tracers import ConsoleCallbackHandler from langchain.callbacks import HumanApprovalCallbackHandler from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory from langchain import PromptTemplate import pandas as pd from langchain.output_parsers import PydanticOutputParser, OutputFixingParser from util.tools import SessionHumanInputRun import util.config as config from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue import redis r = redis.from_url(os.environ.get("REDIS_URL")) #r = redis.from_url('redis://:password@localhost:6379') class Processor: def __init__(self, session): self.session = session async def extract_csv_description(self, df: UploadFile|str, llm, memory) -> Coroutine[Any, Any, Tuple[pd.DataFrame, str]] : df = pd.read_csv(df) agent = create_pandas_dataframe_agent(llm=llm,df=df, agent_executor_kwargs={'handle_parsing_errors':True, 'memory':memory}, early_stopping_method="generate", verbose=True, temperature=0,agent_type=AgentType.OPENAI_FUNCTIONS,) descriptions = agent.run("""Describe what is the column name of each of the column table in detail in the following format: <name of column 1>: <description of column 1>\n <name of column 2>: <description of column 2>""", callbacks=[ConsoleCallbackHandler()]) return df, descriptions async def _human_prompt(prompt, session): r.publish(f'human_prompt_{session}', prompt) async def _human_input(session): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(f'human_input_{session}') message = None while True: message = p.get_message() if message and message['type']=='message': break print("waiting for human input") await asyncio.sleep(1) return message['data'].decode('utf-8') async def process_files(self, table_file, template_file, file_guid): table_string = table_file.decode('utf-8') template_string = template_file.decode('utf-8') llm = ChatOpenAI(openai_api_key=config.OPENAI_API_KEY, temperature=0, model="gpt-3.5-turbo-0613", ) memory = ConversationSummaryBufferMemory(llm=llm,memory_key="chat_history", return_messages=True, max_token_limit=1500) table_df, table_descriptions = await self.extract_csv_description(io.StringIO(table_string), llm, memory=memory) r.publish(f'{self.session}_response', 'table_descriptions') r.publish(f'{self.session}_response', table_descriptions) template_df, template_descriptions = await self.extract_csv_description(io.StringIO(template_string), llm, memory=memory) r.publish(f'{self.session}_response', 'template_descriptions') r.publish(f'{self.session}_response', template_descriptions) dfs =[table_df, template_df] human_tool = SessionHumanInputRun(session=self.session) human_tool.description = ''' Use this tool to take human input. If the mapping is ambiguous, ask 'human' a question with options in the following format. Make the human confirm the mapping by selecting the appropriate number. - Question: The template column <template column name> should be mapped to which one of the table columns (1: <table column name 1>, 2: <table column name 2> (Recommended), 3:<table column name 3>, ...)? Select the appropriate number or specify the column name. ''' human_tool.prompt_func= Processor._human_prompt human_tool.input_func = Processor._human_input mappings = await self.get_mappings(llm, table_descriptions, template_descriptions, human_tool) codes = await self.get_template_formatting_code(llm, table_df, template_df, human_tool, mappings, memory) new_table_df = table_df.loc[:,[code.table_column for code in codes]] for code in codes: new_table_df[code.table_column].apply(lambda x: self.format_value(x,code=code.code)) r.set(f"{self.session}_{file_guid}", new_table_df.to_msgpack(compress='zlib')) r.publish(f'{self.session}_response', f'file_guid:{file_guid}') def format_value(self, source_value, code): value = TransformValue(source=source_value,destination=source_value) try: exec(code, {'value':value}) except Exception as e: r.publish(f'{self.session}_response',f'ERROR: \nCode: \n {code} \n Failed with error: \n{e}') print(e) return value.destination async def get_mappings(self,llm, table_descriptions, template_descriptions, human_tool): parser = PydanticOutputParser(pydantic_object=TemplateMappingList) new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm) agent = initialize_agent( [human_tool], llm, agent=AgentType.OPENAI_FUNCTIONS, handle_parsing_errors=True, early_stopping_method="force", temperature=0.3, output_parser=new_parser, ) descriptions = await agent.arun("""Map all the columns of the Template descriptions to columns of the table Descriptions: - Table Descriptions: """ + table_descriptions + """ - Template Descriptions: """ + template_descriptions + """ Use the table and template descriptions above to determine the mapping based on similarity, formats and distribution. If the table column names are ambiguous take human input. """,callbacks=[ConsoleCallbackHandler()],) print(descriptions) mappings = new_parser.parse(descriptions) return mappings async def get_template_formatting_code(self, llm, table_df, template_df, human_tool, mappings: TemplateMappingList, memory): dfs = [] dfs.append(table_df) dfs.append(template_df) df_locals = {} df_locals[f"table_df"] = table_df df_locals[f"template_df"] = template_df parser = PydanticOutputParser(pydantic_object=TemplateMappingCode) new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm) codes=[] #The code should be in the format of a Python function taking as input a string and returning a string. for mapping in mappings.template_mappings: human_tool.description = f''' Use this tool to get human approval. Always show the samples and code. The human can edit the code and approve it. ''' table_df_samples = table_df[mapping.table_column].sample(5).to_list() template_df_samples = template_df[mapping.template_column].sample(5).to_list() agent = initialize_agent( [PythonAstREPLTool(locals=df_locals)], llm, agent=AgentType.OPENAI_FUNCTIONS, handle_parsing_errors=True, early_stopping_method="force", temperature=0.3, output_parser=new_parser, memory = memory, memory_key = 'chat_history' ) #The AI can determine the format of the column values only after sampling. #As shown in the output below, generate the code as a Python function taking as input a string and returning a string and also include a call to the generated function. code = agent.run(f'''Provide the code to bring the format of values in table_df column '{mapping.table_column}' to the format of values in template_df column '{mapping.template_column}' based off the values, data types and formats. Additional samples to be used to generate the code: '{mapping.table_column}' sample values: [{table_df_samples}] '{mapping.template_column}' samples values: [{template_df_samples}] The input to the code will be a value object with the following attributes: - source: The value of the table_df column '{mapping.table_column}'. - destination: The value of the template_df column '{mapping.template_column}'. Show the sample values using which the code is generated. For example, for date columns, they may be in different formats, and it is necessary to change the format from dd.mm.yyyy to mm.dd.yyyy. Final Answer: ``` ```python def format_value(source_value): <code to transform source_value into destination_value> return destination_value value.destination = format_value(value.source) ``` ``` Final Answer should contain the samples and code. ''', callbacks=[ConsoleCallbackHandler(), ]) print(code) human_code = await human_tool.arun(code + '\nSpecify the code with ```python``` tags.') regex = r"```python((.|\n|\t)*?)```" code = human_code if re.match(regex, human_code) else code matches = re.findall(regex, code) code = '' for match in matches: code = code + '\n'+ '\n'.join(match) codes.append(TemplateMappingCode(template_column=mapping.template_column, table_column=mapping.table_column, code=code)) return codes
[ "langchain.agents.initialize_agent", "langchain.memory.ConversationSummaryBufferMemory", "langchain.output_parsers.PydanticOutputParser", "langchain.tools.PythonAstREPLTool", "langchain.agents.create_pandas_dataframe_agent", "langchain.chat_models.ChatOpenAI", "langchain.callbacks.tracers.ConsoleCallbackHandler", "langchain.output_parsers.OutputFixingParser.from_llm" ]
[((963, 990), 'os.environ.get', 'os.environ.get', (['"""REDIS_URL"""'], {}), "('REDIS_URL')\n", (977, 990), False, 'import os\n'), ((1270, 1285), 'pandas.read_csv', 'pd.read_csv', (['df'], {}), '(df)\n', (1281, 1285), True, 'import pandas as pd\n'), ((1302, 1537), 'langchain.agents.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', ([], {'llm': 'llm', 'df': 'df', 'agent_executor_kwargs': "{'handle_parsing_errors': True, 'memory': memory}", 'early_stopping_method': '"""generate"""', 'verbose': '(True)', 'temperature': '(0)', 'agent_type': 'AgentType.OPENAI_FUNCTIONS'}), "(llm=llm, df=df, agent_executor_kwargs={\n 'handle_parsing_errors': True, 'memory': memory}, early_stopping_method\n ='generate', verbose=True, temperature=0, agent_type=AgentType.\n OPENAI_FUNCTIONS)\n", (1331, 1537), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((2683, 2779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'config.OPENAI_API_KEY', 'temperature': '(0)', 'model': '"""gpt-3.5-turbo-0613"""'}), "(openai_api_key=config.OPENAI_API_KEY, temperature=0, model=\n 'gpt-3.5-turbo-0613')\n", (2693, 2779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2794, 2909), 'langchain.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(1500)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True, max_token_limit=1500)\n", (2825, 2909), False, 'from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory\n'), ((3498, 3540), 'util.tools.SessionHumanInputRun', 'SessionHumanInputRun', ([], {'session': 'self.session'}), '(session=self.session)\n', (3518, 3540), False, 'from util.tools import SessionHumanInputRun\n'), ((4868, 4929), 'util.model.TransformValue', 'TransformValue', ([], {'source': 'source_value', 'destination': 'source_value'}), '(source=source_value, destination=source_value)\n', (4882, 4929), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((5288, 5345), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingList'}), '(pydantic_object=TemplateMappingList)\n', (5308, 5345), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5367, 5418), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (5394, 5418), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5434, 5614), 'langchain.agents.initialize_agent', 'initialize_agent', (['[human_tool]', 'llm'], {'agent': 'AgentType.OPENAI_FUNCTIONS', 'handle_parsing_errors': '(True)', 'early_stopping_method': '"""force"""', 'temperature': '(0.3)', 'output_parser': 'new_parser'}), "([human_tool], llm, agent=AgentType.OPENAI_FUNCTIONS,\n handle_parsing_errors=True, early_stopping_method='force', temperature=\n 0.3, output_parser=new_parser)\n", (5450, 5614), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((6899, 6956), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingCode'}), '(pydantic_object=TemplateMappingCode)\n', (6919, 6956), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((6978, 7029), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (7005, 7029), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((10532, 10555), 're.findall', 're.findall', (['regex', 'code'], {}), '(regex, code)\n', (10542, 10555), False, 'import re\n'), ((2425, 2441), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (2438, 2441), False, 'import asyncio\n'), ((2988, 3013), 'io.StringIO', 'io.StringIO', (['table_string'], {}), '(table_string)\n', (2999, 3013), False, 'import io\n'), ((3250, 3278), 'io.StringIO', 'io.StringIO', (['template_string'], {}), '(template_string)\n', (3261, 3278), False, 'import io\n'), ((10459, 10486), 're.match', 're.match', (['regex', 'human_code'], {}), '(regex, human_code)\n', (10467, 10486), False, 'import re\n'), ((10690, 10801), 'util.model.TemplateMappingCode', 'TemplateMappingCode', ([], {'template_column': 'mapping.template_column', 'table_column': 'mapping.table_column', 'code': 'code'}), '(template_column=mapping.template_column, table_column=\n mapping.table_column, code=code)\n', (10709, 10801), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((1905, 1929), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (1927, 1929), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((7662, 7697), 'langchain.tools.PythonAstREPLTool', 'PythonAstREPLTool', ([], {'locals': 'df_locals'}), '(locals=df_locals)\n', (7679, 7697), False, 'from langchain.tools import HumanInputRun, PythonAstREPLTool\n'), ((6408, 6432), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (6430, 6432), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((10224, 10248), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (10246, 10248), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n')]
import csv from ctypes import Array from typing import Any, Coroutine, List, Tuple import io import time import re import os from fastapi import UploadFile import asyncio import langchain from langchain.chat_models import ChatOpenAI from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent from langchain.tools import HumanInputRun, PythonAstREPLTool from langchain.callbacks.tracers import ConsoleCallbackHandler from langchain.callbacks import HumanApprovalCallbackHandler from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory from langchain import PromptTemplate import pandas as pd from langchain.output_parsers import PydanticOutputParser, OutputFixingParser from util.tools import SessionHumanInputRun import util.config as config from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue import redis r = redis.from_url(os.environ.get("REDIS_URL")) #r = redis.from_url('redis://:password@localhost:6379') class Processor: def __init__(self, session): self.session = session async def extract_csv_description(self, df: UploadFile|str, llm, memory) -> Coroutine[Any, Any, Tuple[pd.DataFrame, str]] : df = pd.read_csv(df) agent = create_pandas_dataframe_agent(llm=llm,df=df, agent_executor_kwargs={'handle_parsing_errors':True, 'memory':memory}, early_stopping_method="generate", verbose=True, temperature=0,agent_type=AgentType.OPENAI_FUNCTIONS,) descriptions = agent.run("""Describe what is the column name of each of the column table in detail in the following format: <name of column 1>: <description of column 1>\n <name of column 2>: <description of column 2>""", callbacks=[ConsoleCallbackHandler()]) return df, descriptions async def _human_prompt(prompt, session): r.publish(f'human_prompt_{session}', prompt) async def _human_input(session): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(f'human_input_{session}') message = None while True: message = p.get_message() if message and message['type']=='message': break print("waiting for human input") await asyncio.sleep(1) return message['data'].decode('utf-8') async def process_files(self, table_file, template_file, file_guid): table_string = table_file.decode('utf-8') template_string = template_file.decode('utf-8') llm = ChatOpenAI(openai_api_key=config.OPENAI_API_KEY, temperature=0, model="gpt-3.5-turbo-0613", ) memory = ConversationSummaryBufferMemory(llm=llm,memory_key="chat_history", return_messages=True, max_token_limit=1500) table_df, table_descriptions = await self.extract_csv_description(io.StringIO(table_string), llm, memory=memory) r.publish(f'{self.session}_response', 'table_descriptions') r.publish(f'{self.session}_response', table_descriptions) template_df, template_descriptions = await self.extract_csv_description(io.StringIO(template_string), llm, memory=memory) r.publish(f'{self.session}_response', 'template_descriptions') r.publish(f'{self.session}_response', template_descriptions) dfs =[table_df, template_df] human_tool = SessionHumanInputRun(session=self.session) human_tool.description = ''' Use this tool to take human input. If the mapping is ambiguous, ask 'human' a question with options in the following format. Make the human confirm the mapping by selecting the appropriate number. - Question: The template column <template column name> should be mapped to which one of the table columns (1: <table column name 1>, 2: <table column name 2> (Recommended), 3:<table column name 3>, ...)? Select the appropriate number or specify the column name. ''' human_tool.prompt_func= Processor._human_prompt human_tool.input_func = Processor._human_input mappings = await self.get_mappings(llm, table_descriptions, template_descriptions, human_tool) codes = await self.get_template_formatting_code(llm, table_df, template_df, human_tool, mappings, memory) new_table_df = table_df.loc[:,[code.table_column for code in codes]] for code in codes: new_table_df[code.table_column].apply(lambda x: self.format_value(x,code=code.code)) r.set(f"{self.session}_{file_guid}", new_table_df.to_msgpack(compress='zlib')) r.publish(f'{self.session}_response', f'file_guid:{file_guid}') def format_value(self, source_value, code): value = TransformValue(source=source_value,destination=source_value) try: exec(code, {'value':value}) except Exception as e: r.publish(f'{self.session}_response',f'ERROR: \nCode: \n {code} \n Failed with error: \n{e}') print(e) return value.destination async def get_mappings(self,llm, table_descriptions, template_descriptions, human_tool): parser = PydanticOutputParser(pydantic_object=TemplateMappingList) new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm) agent = initialize_agent( [human_tool], llm, agent=AgentType.OPENAI_FUNCTIONS, handle_parsing_errors=True, early_stopping_method="force", temperature=0.3, output_parser=new_parser, ) descriptions = await agent.arun("""Map all the columns of the Template descriptions to columns of the table Descriptions: - Table Descriptions: """ + table_descriptions + """ - Template Descriptions: """ + template_descriptions + """ Use the table and template descriptions above to determine the mapping based on similarity, formats and distribution. If the table column names are ambiguous take human input. """,callbacks=[ConsoleCallbackHandler()],) print(descriptions) mappings = new_parser.parse(descriptions) return mappings async def get_template_formatting_code(self, llm, table_df, template_df, human_tool, mappings: TemplateMappingList, memory): dfs = [] dfs.append(table_df) dfs.append(template_df) df_locals = {} df_locals[f"table_df"] = table_df df_locals[f"template_df"] = template_df parser = PydanticOutputParser(pydantic_object=TemplateMappingCode) new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm) codes=[] #The code should be in the format of a Python function taking as input a string and returning a string. for mapping in mappings.template_mappings: human_tool.description = f''' Use this tool to get human approval. Always show the samples and code. The human can edit the code and approve it. ''' table_df_samples = table_df[mapping.table_column].sample(5).to_list() template_df_samples = template_df[mapping.template_column].sample(5).to_list() agent = initialize_agent( [PythonAstREPLTool(locals=df_locals)], llm, agent=AgentType.OPENAI_FUNCTIONS, handle_parsing_errors=True, early_stopping_method="force", temperature=0.3, output_parser=new_parser, memory = memory, memory_key = 'chat_history' ) #The AI can determine the format of the column values only after sampling. #As shown in the output below, generate the code as a Python function taking as input a string and returning a string and also include a call to the generated function. code = agent.run(f'''Provide the code to bring the format of values in table_df column '{mapping.table_column}' to the format of values in template_df column '{mapping.template_column}' based off the values, data types and formats. Additional samples to be used to generate the code: '{mapping.table_column}' sample values: [{table_df_samples}] '{mapping.template_column}' samples values: [{template_df_samples}] The input to the code will be a value object with the following attributes: - source: The value of the table_df column '{mapping.table_column}'. - destination: The value of the template_df column '{mapping.template_column}'. Show the sample values using which the code is generated. For example, for date columns, they may be in different formats, and it is necessary to change the format from dd.mm.yyyy to mm.dd.yyyy. Final Answer: ``` ```python def format_value(source_value): <code to transform source_value into destination_value> return destination_value value.destination = format_value(value.source) ``` ``` Final Answer should contain the samples and code. ''', callbacks=[ConsoleCallbackHandler(), ]) print(code) human_code = await human_tool.arun(code + '\nSpecify the code with ```python``` tags.') regex = r"```python((.|\n|\t)*?)```" code = human_code if re.match(regex, human_code) else code matches = re.findall(regex, code) code = '' for match in matches: code = code + '\n'+ '\n'.join(match) codes.append(TemplateMappingCode(template_column=mapping.template_column, table_column=mapping.table_column, code=code)) return codes
[ "langchain.agents.initialize_agent", "langchain.memory.ConversationSummaryBufferMemory", "langchain.output_parsers.PydanticOutputParser", "langchain.tools.PythonAstREPLTool", "langchain.agents.create_pandas_dataframe_agent", "langchain.chat_models.ChatOpenAI", "langchain.callbacks.tracers.ConsoleCallbackHandler", "langchain.output_parsers.OutputFixingParser.from_llm" ]
[((963, 990), 'os.environ.get', 'os.environ.get', (['"""REDIS_URL"""'], {}), "('REDIS_URL')\n", (977, 990), False, 'import os\n'), ((1270, 1285), 'pandas.read_csv', 'pd.read_csv', (['df'], {}), '(df)\n', (1281, 1285), True, 'import pandas as pd\n'), ((1302, 1537), 'langchain.agents.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', ([], {'llm': 'llm', 'df': 'df', 'agent_executor_kwargs': "{'handle_parsing_errors': True, 'memory': memory}", 'early_stopping_method': '"""generate"""', 'verbose': '(True)', 'temperature': '(0)', 'agent_type': 'AgentType.OPENAI_FUNCTIONS'}), "(llm=llm, df=df, agent_executor_kwargs={\n 'handle_parsing_errors': True, 'memory': memory}, early_stopping_method\n ='generate', verbose=True, temperature=0, agent_type=AgentType.\n OPENAI_FUNCTIONS)\n", (1331, 1537), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((2683, 2779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'config.OPENAI_API_KEY', 'temperature': '(0)', 'model': '"""gpt-3.5-turbo-0613"""'}), "(openai_api_key=config.OPENAI_API_KEY, temperature=0, model=\n 'gpt-3.5-turbo-0613')\n", (2693, 2779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2794, 2909), 'langchain.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(1500)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True, max_token_limit=1500)\n", (2825, 2909), False, 'from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory\n'), ((3498, 3540), 'util.tools.SessionHumanInputRun', 'SessionHumanInputRun', ([], {'session': 'self.session'}), '(session=self.session)\n', (3518, 3540), False, 'from util.tools import SessionHumanInputRun\n'), ((4868, 4929), 'util.model.TransformValue', 'TransformValue', ([], {'source': 'source_value', 'destination': 'source_value'}), '(source=source_value, destination=source_value)\n', (4882, 4929), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((5288, 5345), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingList'}), '(pydantic_object=TemplateMappingList)\n', (5308, 5345), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5367, 5418), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (5394, 5418), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5434, 5614), 'langchain.agents.initialize_agent', 'initialize_agent', (['[human_tool]', 'llm'], {'agent': 'AgentType.OPENAI_FUNCTIONS', 'handle_parsing_errors': '(True)', 'early_stopping_method': '"""force"""', 'temperature': '(0.3)', 'output_parser': 'new_parser'}), "([human_tool], llm, agent=AgentType.OPENAI_FUNCTIONS,\n handle_parsing_errors=True, early_stopping_method='force', temperature=\n 0.3, output_parser=new_parser)\n", (5450, 5614), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((6899, 6956), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingCode'}), '(pydantic_object=TemplateMappingCode)\n', (6919, 6956), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((6978, 7029), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (7005, 7029), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((10532, 10555), 're.findall', 're.findall', (['regex', 'code'], {}), '(regex, code)\n', (10542, 10555), False, 'import re\n'), ((2425, 2441), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (2438, 2441), False, 'import asyncio\n'), ((2988, 3013), 'io.StringIO', 'io.StringIO', (['table_string'], {}), '(table_string)\n', (2999, 3013), False, 'import io\n'), ((3250, 3278), 'io.StringIO', 'io.StringIO', (['template_string'], {}), '(template_string)\n', (3261, 3278), False, 'import io\n'), ((10459, 10486), 're.match', 're.match', (['regex', 'human_code'], {}), '(regex, human_code)\n', (10467, 10486), False, 'import re\n'), ((10690, 10801), 'util.model.TemplateMappingCode', 'TemplateMappingCode', ([], {'template_column': 'mapping.template_column', 'table_column': 'mapping.table_column', 'code': 'code'}), '(template_column=mapping.template_column, table_column=\n mapping.table_column, code=code)\n', (10709, 10801), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((1905, 1929), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (1927, 1929), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((7662, 7697), 'langchain.tools.PythonAstREPLTool', 'PythonAstREPLTool', ([], {'locals': 'df_locals'}), '(locals=df_locals)\n', (7679, 7697), False, 'from langchain.tools import HumanInputRun, PythonAstREPLTool\n'), ((6408, 6432), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (6430, 6432), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((10224, 10248), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (10246, 10248), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n')]
from typing import Dict, List, Optional from langchain.agents.load_tools import ( _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS, ) from langflow.custom import customs from langflow.interface.base import LangChainTypeCreator from langflow.interface.tools.constants import ( ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS, ) from langflow.interface.tools.util import get_tool_params from langflow.settings import settings from langflow.template.field.base import TemplateField from langflow.template.template.base import Template from langflow.utils import util from langflow.utils.util import build_template_from_class TOOL_INPUTS = { "str": TemplateField( field_type="str", required=True, is_list=False, show=True, placeholder="", value="", ), "llm": TemplateField( field_type="BaseLanguageModel", required=True, is_list=False, show=True ), "func": TemplateField( field_type="function", required=True, is_list=False, show=True, multiline=True, ), "code": TemplateField( field_type="str", required=True, is_list=False, show=True, value="", multiline=True, ), "path": TemplateField( field_type="file", required=True, is_list=False, show=True, value="", suffixes=[".json", ".yaml", ".yml"], fileTypes=["json", "yaml", "yml"], ), } class ToolCreator(LangChainTypeCreator): type_name: str = "tools" tools_dict: Optional[Dict] = None @property def type_to_loader_dict(self) -> Dict: if self.tools_dict is None: all_tools = {} for tool, tool_fcn in ALL_TOOLS_NAMES.items(): tool_params = get_tool_params(tool_fcn) tool_name = tool_params.get("name") or tool if tool_name in settings.tools or settings.dev: if tool_name == "JsonSpec": tool_params["path"] = tool_params.pop("dict_") # type: ignore all_tools[tool_name] = { "type": tool, "params": tool_params, "fcn": tool_fcn, } self.tools_dict = all_tools return self.tools_dict def get_signature(self, name: str) -> Optional[Dict]: """Get the signature of a tool.""" base_classes = ["Tool", "BaseTool"] fields = [] params = [] tool_params = {} # Raise error if name is not in tools if name not in self.type_to_loader_dict.keys(): raise ValueError("Tool not found") tool_type: str = self.type_to_loader_dict[name]["type"] # type: ignore # if tool_type in _BASE_TOOLS.keys(): # params = [] if tool_type in _LLM_TOOLS.keys(): params = ["llm"] elif tool_type in _EXTRA_LLM_TOOLS.keys(): extra_keys = _EXTRA_LLM_TOOLS[tool_type][1] params = ["llm"] + extra_keys elif tool_type in _EXTRA_OPTIONAL_TOOLS.keys(): extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type][1] params = extra_keys # elif tool_type == "Tool": # params = ["name", "description", "func"] elif tool_type in CUSTOM_TOOLS: # Get custom tool params params = self.type_to_loader_dict[name]["params"] # type: ignore base_classes = ["function"] if node := customs.get_custom_nodes("tools").get(tool_type): return node elif tool_type in FILE_TOOLS: params = self.type_to_loader_dict[name]["params"] # type: ignore base_classes += [name] elif tool_type in OTHER_TOOLS: tool_dict = build_template_from_class(tool_type, OTHER_TOOLS) fields = tool_dict["template"] # Pop unnecessary fields and add name fields.pop("_type") # type: ignore fields.pop("return_direct") # type: ignore fields.pop("verbose") # type: ignore tool_params = { "name": fields.pop("name")["value"], # type: ignore "description": fields.pop("description")["value"], # type: ignore } fields = [ TemplateField(name=name, field_type=field["type"], **field) for name, field in fields.items() # type: ignore ] base_classes += tool_dict["base_classes"] # Copy the field and add the name for param in params: field = TOOL_INPUTS.get(param, TOOL_INPUTS["str"]).copy() field.name = param field.advanced = False if param == "aiosession": field.show = False field.required = False fields.append(field) template = Template(fields=fields, type_name=tool_type) tool_params = {**tool_params, **self.type_to_loader_dict[name]["params"]} return { "template": util.format_dict(template.to_dict()), **tool_params, "base_classes": base_classes, } def to_list(self) -> List[str]: """List all load tools""" return list(self.type_to_loader_dict.keys()) tool_creator = ToolCreator()
[ "langchain.agents.load_tools._LLM_TOOLS.keys", "langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys", "langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys" ]
[((690, 792), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (703, 792), False, 'from langflow.template.field.base import TemplateField\n'), ((856, 946), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""BaseLanguageModel"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)'}), "(field_type='BaseLanguageModel', required=True, is_list=False,\n show=True)\n", (869, 946), False, 'from langflow.template.field.base import TemplateField\n'), ((970, 1068), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""function"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'multiline': '(True)'}), "(field_type='function', required=True, is_list=False, show=\n True, multiline=True)\n", (983, 1068), False, 'from langflow.template.field.base import TemplateField\n'), ((1124, 1226), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'multiline': '(True)'}), "(field_type='str', required=True, is_list=False, show=True,\n value='', multiline=True)\n", (1137, 1226), False, 'from langflow.template.field.base import TemplateField\n'), ((1291, 1454), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""file"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'suffixes': "['.json', '.yaml', '.yml']", 'fileTypes': "['json', 'yaml', 'yml']"}), "(field_type='file', required=True, is_list=False, show=True,\n value='', suffixes=['.json', '.yaml', '.yml'], fileTypes=['json',\n 'yaml', 'yml'])\n", (1304, 1454), False, 'from langflow.template.field.base import TemplateField\n'), ((4975, 5019), 'langflow.template.template.base.Template', 'Template', ([], {'fields': 'fields', 'type_name': 'tool_type'}), '(fields=fields, type_name=tool_type)\n', (4983, 5019), False, 'from langflow.template.template.base import Template\n'), ((1779, 1802), 'langflow.interface.tools.constants.ALL_TOOLS_NAMES.items', 'ALL_TOOLS_NAMES.items', ([], {}), '()\n', (1800, 1802), False, 'from langflow.interface.tools.constants import ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS\n'), ((2927, 2944), 'langchain.agents.load_tools._LLM_TOOLS.keys', '_LLM_TOOLS.keys', ([], {}), '()\n', (2942, 2944), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((1834, 1859), 'langflow.interface.tools.util.get_tool_params', 'get_tool_params', (['tool_fcn'], {}), '(tool_fcn)\n', (1849, 1859), False, 'from langflow.interface.tools.util import get_tool_params\n'), ((3001, 3024), 'langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys', '_EXTRA_LLM_TOOLS.keys', ([], {}), '()\n', (3022, 3024), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3150, 3178), 'langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys', '_EXTRA_OPTIONAL_TOOLS.keys', ([], {}), '()\n', (3176, 3178), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3874, 3923), 'langflow.utils.util.build_template_from_class', 'build_template_from_class', (['tool_type', 'OTHER_TOOLS'], {}), '(tool_type, OTHER_TOOLS)\n', (3899, 3923), False, 'from langflow.utils.util import build_template_from_class\n'), ((3582, 3615), 'langflow.custom.customs.get_custom_nodes', 'customs.get_custom_nodes', (['"""tools"""'], {}), "('tools')\n", (3606, 3615), False, 'from langflow.custom import customs\n'), ((4407, 4466), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'name': 'name', 'field_type': "field['type']"}), "(name=name, field_type=field['type'], **field)\n", (4420, 4466), False, 'from langflow.template.field.base import TemplateField\n')]
from typing import Dict, List, Optional from langchain.agents.load_tools import ( _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS, ) from langflow.custom import customs from langflow.interface.base import LangChainTypeCreator from langflow.interface.tools.constants import ( ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS, ) from langflow.interface.tools.util import get_tool_params from langflow.settings import settings from langflow.template.field.base import TemplateField from langflow.template.template.base import Template from langflow.utils import util from langflow.utils.util import build_template_from_class TOOL_INPUTS = { "str": TemplateField( field_type="str", required=True, is_list=False, show=True, placeholder="", value="", ), "llm": TemplateField( field_type="BaseLanguageModel", required=True, is_list=False, show=True ), "func": TemplateField( field_type="function", required=True, is_list=False, show=True, multiline=True, ), "code": TemplateField( field_type="str", required=True, is_list=False, show=True, value="", multiline=True, ), "path": TemplateField( field_type="file", required=True, is_list=False, show=True, value="", suffixes=[".json", ".yaml", ".yml"], fileTypes=["json", "yaml", "yml"], ), } class ToolCreator(LangChainTypeCreator): type_name: str = "tools" tools_dict: Optional[Dict] = None @property def type_to_loader_dict(self) -> Dict: if self.tools_dict is None: all_tools = {} for tool, tool_fcn in ALL_TOOLS_NAMES.items(): tool_params = get_tool_params(tool_fcn) tool_name = tool_params.get("name") or tool if tool_name in settings.tools or settings.dev: if tool_name == "JsonSpec": tool_params["path"] = tool_params.pop("dict_") # type: ignore all_tools[tool_name] = { "type": tool, "params": tool_params, "fcn": tool_fcn, } self.tools_dict = all_tools return self.tools_dict def get_signature(self, name: str) -> Optional[Dict]: """Get the signature of a tool.""" base_classes = ["Tool", "BaseTool"] fields = [] params = [] tool_params = {} # Raise error if name is not in tools if name not in self.type_to_loader_dict.keys(): raise ValueError("Tool not found") tool_type: str = self.type_to_loader_dict[name]["type"] # type: ignore # if tool_type in _BASE_TOOLS.keys(): # params = [] if tool_type in _LLM_TOOLS.keys(): params = ["llm"] elif tool_type in _EXTRA_LLM_TOOLS.keys(): extra_keys = _EXTRA_LLM_TOOLS[tool_type][1] params = ["llm"] + extra_keys elif tool_type in _EXTRA_OPTIONAL_TOOLS.keys(): extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type][1] params = extra_keys # elif tool_type == "Tool": # params = ["name", "description", "func"] elif tool_type in CUSTOM_TOOLS: # Get custom tool params params = self.type_to_loader_dict[name]["params"] # type: ignore base_classes = ["function"] if node := customs.get_custom_nodes("tools").get(tool_type): return node elif tool_type in FILE_TOOLS: params = self.type_to_loader_dict[name]["params"] # type: ignore base_classes += [name] elif tool_type in OTHER_TOOLS: tool_dict = build_template_from_class(tool_type, OTHER_TOOLS) fields = tool_dict["template"] # Pop unnecessary fields and add name fields.pop("_type") # type: ignore fields.pop("return_direct") # type: ignore fields.pop("verbose") # type: ignore tool_params = { "name": fields.pop("name")["value"], # type: ignore "description": fields.pop("description")["value"], # type: ignore } fields = [ TemplateField(name=name, field_type=field["type"], **field) for name, field in fields.items() # type: ignore ] base_classes += tool_dict["base_classes"] # Copy the field and add the name for param in params: field = TOOL_INPUTS.get(param, TOOL_INPUTS["str"]).copy() field.name = param field.advanced = False if param == "aiosession": field.show = False field.required = False fields.append(field) template = Template(fields=fields, type_name=tool_type) tool_params = {**tool_params, **self.type_to_loader_dict[name]["params"]} return { "template": util.format_dict(template.to_dict()), **tool_params, "base_classes": base_classes, } def to_list(self) -> List[str]: """List all load tools""" return list(self.type_to_loader_dict.keys()) tool_creator = ToolCreator()
[ "langchain.agents.load_tools._LLM_TOOLS.keys", "langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys", "langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys" ]
[((690, 792), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (703, 792), False, 'from langflow.template.field.base import TemplateField\n'), ((856, 946), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""BaseLanguageModel"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)'}), "(field_type='BaseLanguageModel', required=True, is_list=False,\n show=True)\n", (869, 946), False, 'from langflow.template.field.base import TemplateField\n'), ((970, 1068), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""function"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'multiline': '(True)'}), "(field_type='function', required=True, is_list=False, show=\n True, multiline=True)\n", (983, 1068), False, 'from langflow.template.field.base import TemplateField\n'), ((1124, 1226), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'multiline': '(True)'}), "(field_type='str', required=True, is_list=False, show=True,\n value='', multiline=True)\n", (1137, 1226), False, 'from langflow.template.field.base import TemplateField\n'), ((1291, 1454), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""file"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'suffixes': "['.json', '.yaml', '.yml']", 'fileTypes': "['json', 'yaml', 'yml']"}), "(field_type='file', required=True, is_list=False, show=True,\n value='', suffixes=['.json', '.yaml', '.yml'], fileTypes=['json',\n 'yaml', 'yml'])\n", (1304, 1454), False, 'from langflow.template.field.base import TemplateField\n'), ((4975, 5019), 'langflow.template.template.base.Template', 'Template', ([], {'fields': 'fields', 'type_name': 'tool_type'}), '(fields=fields, type_name=tool_type)\n', (4983, 5019), False, 'from langflow.template.template.base import Template\n'), ((1779, 1802), 'langflow.interface.tools.constants.ALL_TOOLS_NAMES.items', 'ALL_TOOLS_NAMES.items', ([], {}), '()\n', (1800, 1802), False, 'from langflow.interface.tools.constants import ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS\n'), ((2927, 2944), 'langchain.agents.load_tools._LLM_TOOLS.keys', '_LLM_TOOLS.keys', ([], {}), '()\n', (2942, 2944), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((1834, 1859), 'langflow.interface.tools.util.get_tool_params', 'get_tool_params', (['tool_fcn'], {}), '(tool_fcn)\n', (1849, 1859), False, 'from langflow.interface.tools.util import get_tool_params\n'), ((3001, 3024), 'langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys', '_EXTRA_LLM_TOOLS.keys', ([], {}), '()\n', (3022, 3024), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3150, 3178), 'langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys', '_EXTRA_OPTIONAL_TOOLS.keys', ([], {}), '()\n', (3176, 3178), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3874, 3923), 'langflow.utils.util.build_template_from_class', 'build_template_from_class', (['tool_type', 'OTHER_TOOLS'], {}), '(tool_type, OTHER_TOOLS)\n', (3899, 3923), False, 'from langflow.utils.util import build_template_from_class\n'), ((3582, 3615), 'langflow.custom.customs.get_custom_nodes', 'customs.get_custom_nodes', (['"""tools"""'], {}), "('tools')\n", (3606, 3615), False, 'from langflow.custom import customs\n'), ((4407, 4466), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'name': 'name', 'field_type': "field['type']"}), "(name=name, field_type=field['type'], **field)\n", (4420, 4466), False, 'from langflow.template.field.base import TemplateField\n')]
from typing import Dict, List, Optional from langchain.agents.load_tools import ( _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS, ) from langflow.custom import customs from langflow.interface.base import LangChainTypeCreator from langflow.interface.tools.constants import ( ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS, ) from langflow.interface.tools.util import get_tool_params from langflow.settings import settings from langflow.template.field.base import TemplateField from langflow.template.template.base import Template from langflow.utils import util from langflow.utils.util import build_template_from_class TOOL_INPUTS = { "str": TemplateField( field_type="str", required=True, is_list=False, show=True, placeholder="", value="", ), "llm": TemplateField( field_type="BaseLanguageModel", required=True, is_list=False, show=True ), "func": TemplateField( field_type="function", required=True, is_list=False, show=True, multiline=True, ), "code": TemplateField( field_type="str", required=True, is_list=False, show=True, value="", multiline=True, ), "path": TemplateField( field_type="file", required=True, is_list=False, show=True, value="", suffixes=[".json", ".yaml", ".yml"], fileTypes=["json", "yaml", "yml"], ), } class ToolCreator(LangChainTypeCreator): type_name: str = "tools" tools_dict: Optional[Dict] = None @property def type_to_loader_dict(self) -> Dict: if self.tools_dict is None: all_tools = {} for tool, tool_fcn in ALL_TOOLS_NAMES.items(): tool_params = get_tool_params(tool_fcn) tool_name = tool_params.get("name") or tool if tool_name in settings.tools or settings.dev: if tool_name == "JsonSpec": tool_params["path"] = tool_params.pop("dict_") # type: ignore all_tools[tool_name] = { "type": tool, "params": tool_params, "fcn": tool_fcn, } self.tools_dict = all_tools return self.tools_dict def get_signature(self, name: str) -> Optional[Dict]: """Get the signature of a tool.""" base_classes = ["Tool", "BaseTool"] fields = [] params = [] tool_params = {} # Raise error if name is not in tools if name not in self.type_to_loader_dict.keys(): raise ValueError("Tool not found") tool_type: str = self.type_to_loader_dict[name]["type"] # type: ignore # if tool_type in _BASE_TOOLS.keys(): # params = [] if tool_type in _LLM_TOOLS.keys(): params = ["llm"] elif tool_type in _EXTRA_LLM_TOOLS.keys(): extra_keys = _EXTRA_LLM_TOOLS[tool_type][1] params = ["llm"] + extra_keys elif tool_type in _EXTRA_OPTIONAL_TOOLS.keys(): extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type][1] params = extra_keys # elif tool_type == "Tool": # params = ["name", "description", "func"] elif tool_type in CUSTOM_TOOLS: # Get custom tool params params = self.type_to_loader_dict[name]["params"] # type: ignore base_classes = ["function"] if node := customs.get_custom_nodes("tools").get(tool_type): return node elif tool_type in FILE_TOOLS: params = self.type_to_loader_dict[name]["params"] # type: ignore base_classes += [name] elif tool_type in OTHER_TOOLS: tool_dict = build_template_from_class(tool_type, OTHER_TOOLS) fields = tool_dict["template"] # Pop unnecessary fields and add name fields.pop("_type") # type: ignore fields.pop("return_direct") # type: ignore fields.pop("verbose") # type: ignore tool_params = { "name": fields.pop("name")["value"], # type: ignore "description": fields.pop("description")["value"], # type: ignore } fields = [ TemplateField(name=name, field_type=field["type"], **field) for name, field in fields.items() # type: ignore ] base_classes += tool_dict["base_classes"] # Copy the field and add the name for param in params: field = TOOL_INPUTS.get(param, TOOL_INPUTS["str"]).copy() field.name = param field.advanced = False if param == "aiosession": field.show = False field.required = False fields.append(field) template = Template(fields=fields, type_name=tool_type) tool_params = {**tool_params, **self.type_to_loader_dict[name]["params"]} return { "template": util.format_dict(template.to_dict()), **tool_params, "base_classes": base_classes, } def to_list(self) -> List[str]: """List all load tools""" return list(self.type_to_loader_dict.keys()) tool_creator = ToolCreator()
[ "langchain.agents.load_tools._LLM_TOOLS.keys", "langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys", "langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys" ]
[((690, 792), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (703, 792), False, 'from langflow.template.field.base import TemplateField\n'), ((856, 946), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""BaseLanguageModel"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)'}), "(field_type='BaseLanguageModel', required=True, is_list=False,\n show=True)\n", (869, 946), False, 'from langflow.template.field.base import TemplateField\n'), ((970, 1068), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""function"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'multiline': '(True)'}), "(field_type='function', required=True, is_list=False, show=\n True, multiline=True)\n", (983, 1068), False, 'from langflow.template.field.base import TemplateField\n'), ((1124, 1226), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'multiline': '(True)'}), "(field_type='str', required=True, is_list=False, show=True,\n value='', multiline=True)\n", (1137, 1226), False, 'from langflow.template.field.base import TemplateField\n'), ((1291, 1454), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""file"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'suffixes': "['.json', '.yaml', '.yml']", 'fileTypes': "['json', 'yaml', 'yml']"}), "(field_type='file', required=True, is_list=False, show=True,\n value='', suffixes=['.json', '.yaml', '.yml'], fileTypes=['json',\n 'yaml', 'yml'])\n", (1304, 1454), False, 'from langflow.template.field.base import TemplateField\n'), ((4975, 5019), 'langflow.template.template.base.Template', 'Template', ([], {'fields': 'fields', 'type_name': 'tool_type'}), '(fields=fields, type_name=tool_type)\n', (4983, 5019), False, 'from langflow.template.template.base import Template\n'), ((1779, 1802), 'langflow.interface.tools.constants.ALL_TOOLS_NAMES.items', 'ALL_TOOLS_NAMES.items', ([], {}), '()\n', (1800, 1802), False, 'from langflow.interface.tools.constants import ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS\n'), ((2927, 2944), 'langchain.agents.load_tools._LLM_TOOLS.keys', '_LLM_TOOLS.keys', ([], {}), '()\n', (2942, 2944), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((1834, 1859), 'langflow.interface.tools.util.get_tool_params', 'get_tool_params', (['tool_fcn'], {}), '(tool_fcn)\n', (1849, 1859), False, 'from langflow.interface.tools.util import get_tool_params\n'), ((3001, 3024), 'langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys', '_EXTRA_LLM_TOOLS.keys', ([], {}), '()\n', (3022, 3024), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3150, 3178), 'langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys', '_EXTRA_OPTIONAL_TOOLS.keys', ([], {}), '()\n', (3176, 3178), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3874, 3923), 'langflow.utils.util.build_template_from_class', 'build_template_from_class', (['tool_type', 'OTHER_TOOLS'], {}), '(tool_type, OTHER_TOOLS)\n', (3899, 3923), False, 'from langflow.utils.util import build_template_from_class\n'), ((3582, 3615), 'langflow.custom.customs.get_custom_nodes', 'customs.get_custom_nodes', (['"""tools"""'], {}), "('tools')\n", (3606, 3615), False, 'from langflow.custom import customs\n'), ((4407, 4466), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'name': 'name', 'field_type': "field['type']"}), "(name=name, field_type=field['type'], **field)\n", (4420, 4466), False, 'from langflow.template.field.base import TemplateField\n')]
from typing import Dict, List, Optional from langchain.agents.load_tools import ( _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS, ) from langflow.custom import customs from langflow.interface.base import LangChainTypeCreator from langflow.interface.tools.constants import ( ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS, ) from langflow.interface.tools.util import get_tool_params from langflow.settings import settings from langflow.template.field.base import TemplateField from langflow.template.template.base import Template from langflow.utils import util from langflow.utils.util import build_template_from_class TOOL_INPUTS = { "str": TemplateField( field_type="str", required=True, is_list=False, show=True, placeholder="", value="", ), "llm": TemplateField( field_type="BaseLanguageModel", required=True, is_list=False, show=True ), "func": TemplateField( field_type="function", required=True, is_list=False, show=True, multiline=True, ), "code": TemplateField( field_type="str", required=True, is_list=False, show=True, value="", multiline=True, ), "path": TemplateField( field_type="file", required=True, is_list=False, show=True, value="", suffixes=[".json", ".yaml", ".yml"], fileTypes=["json", "yaml", "yml"], ), } class ToolCreator(LangChainTypeCreator): type_name: str = "tools" tools_dict: Optional[Dict] = None @property def type_to_loader_dict(self) -> Dict: if self.tools_dict is None: all_tools = {} for tool, tool_fcn in ALL_TOOLS_NAMES.items(): tool_params = get_tool_params(tool_fcn) tool_name = tool_params.get("name") or tool if tool_name in settings.tools or settings.dev: if tool_name == "JsonSpec": tool_params["path"] = tool_params.pop("dict_") # type: ignore all_tools[tool_name] = { "type": tool, "params": tool_params, "fcn": tool_fcn, } self.tools_dict = all_tools return self.tools_dict def get_signature(self, name: str) -> Optional[Dict]: """Get the signature of a tool.""" base_classes = ["Tool", "BaseTool"] fields = [] params = [] tool_params = {} # Raise error if name is not in tools if name not in self.type_to_loader_dict.keys(): raise ValueError("Tool not found") tool_type: str = self.type_to_loader_dict[name]["type"] # type: ignore # if tool_type in _BASE_TOOLS.keys(): # params = [] if tool_type in _LLM_TOOLS.keys(): params = ["llm"] elif tool_type in _EXTRA_LLM_TOOLS.keys(): extra_keys = _EXTRA_LLM_TOOLS[tool_type][1] params = ["llm"] + extra_keys elif tool_type in _EXTRA_OPTIONAL_TOOLS.keys(): extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type][1] params = extra_keys # elif tool_type == "Tool": # params = ["name", "description", "func"] elif tool_type in CUSTOM_TOOLS: # Get custom tool params params = self.type_to_loader_dict[name]["params"] # type: ignore base_classes = ["function"] if node := customs.get_custom_nodes("tools").get(tool_type): return node elif tool_type in FILE_TOOLS: params = self.type_to_loader_dict[name]["params"] # type: ignore base_classes += [name] elif tool_type in OTHER_TOOLS: tool_dict = build_template_from_class(tool_type, OTHER_TOOLS) fields = tool_dict["template"] # Pop unnecessary fields and add name fields.pop("_type") # type: ignore fields.pop("return_direct") # type: ignore fields.pop("verbose") # type: ignore tool_params = { "name": fields.pop("name")["value"], # type: ignore "description": fields.pop("description")["value"], # type: ignore } fields = [ TemplateField(name=name, field_type=field["type"], **field) for name, field in fields.items() # type: ignore ] base_classes += tool_dict["base_classes"] # Copy the field and add the name for param in params: field = TOOL_INPUTS.get(param, TOOL_INPUTS["str"]).copy() field.name = param field.advanced = False if param == "aiosession": field.show = False field.required = False fields.append(field) template = Template(fields=fields, type_name=tool_type) tool_params = {**tool_params, **self.type_to_loader_dict[name]["params"]} return { "template": util.format_dict(template.to_dict()), **tool_params, "base_classes": base_classes, } def to_list(self) -> List[str]: """List all load tools""" return list(self.type_to_loader_dict.keys()) tool_creator = ToolCreator()
[ "langchain.agents.load_tools._LLM_TOOLS.keys", "langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys", "langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys" ]
[((690, 792), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (703, 792), False, 'from langflow.template.field.base import TemplateField\n'), ((856, 946), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""BaseLanguageModel"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)'}), "(field_type='BaseLanguageModel', required=True, is_list=False,\n show=True)\n", (869, 946), False, 'from langflow.template.field.base import TemplateField\n'), ((970, 1068), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""function"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'multiline': '(True)'}), "(field_type='function', required=True, is_list=False, show=\n True, multiline=True)\n", (983, 1068), False, 'from langflow.template.field.base import TemplateField\n'), ((1124, 1226), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'multiline': '(True)'}), "(field_type='str', required=True, is_list=False, show=True,\n value='', multiline=True)\n", (1137, 1226), False, 'from langflow.template.field.base import TemplateField\n'), ((1291, 1454), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""file"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'suffixes': "['.json', '.yaml', '.yml']", 'fileTypes': "['json', 'yaml', 'yml']"}), "(field_type='file', required=True, is_list=False, show=True,\n value='', suffixes=['.json', '.yaml', '.yml'], fileTypes=['json',\n 'yaml', 'yml'])\n", (1304, 1454), False, 'from langflow.template.field.base import TemplateField\n'), ((4975, 5019), 'langflow.template.template.base.Template', 'Template', ([], {'fields': 'fields', 'type_name': 'tool_type'}), '(fields=fields, type_name=tool_type)\n', (4983, 5019), False, 'from langflow.template.template.base import Template\n'), ((1779, 1802), 'langflow.interface.tools.constants.ALL_TOOLS_NAMES.items', 'ALL_TOOLS_NAMES.items', ([], {}), '()\n', (1800, 1802), False, 'from langflow.interface.tools.constants import ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS\n'), ((2927, 2944), 'langchain.agents.load_tools._LLM_TOOLS.keys', '_LLM_TOOLS.keys', ([], {}), '()\n', (2942, 2944), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((1834, 1859), 'langflow.interface.tools.util.get_tool_params', 'get_tool_params', (['tool_fcn'], {}), '(tool_fcn)\n', (1849, 1859), False, 'from langflow.interface.tools.util import get_tool_params\n'), ((3001, 3024), 'langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys', '_EXTRA_LLM_TOOLS.keys', ([], {}), '()\n', (3022, 3024), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3150, 3178), 'langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys', '_EXTRA_OPTIONAL_TOOLS.keys', ([], {}), '()\n', (3176, 3178), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3874, 3923), 'langflow.utils.util.build_template_from_class', 'build_template_from_class', (['tool_type', 'OTHER_TOOLS'], {}), '(tool_type, OTHER_TOOLS)\n', (3899, 3923), False, 'from langflow.utils.util import build_template_from_class\n'), ((3582, 3615), 'langflow.custom.customs.get_custom_nodes', 'customs.get_custom_nodes', (['"""tools"""'], {}), "('tools')\n", (3606, 3615), False, 'from langflow.custom import customs\n'), ((4407, 4466), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'name': 'name', 'field_type': "field['type']"}), "(name=name, field_type=field['type'], **field)\n", (4420, 4466), False, 'from langflow.template.field.base import TemplateField\n')]
# imports import os, shutil, json, re import pathlib from langchain.document_loaders.unstructured import UnstructuredFileLoader from langchain.document_loaders.unstructured import UnstructuredAPIFileLoader from langchain.document_loaders import UnstructuredURLLoader from langchain.docstore.document import Document from google.cloud import storage import base64 import langchain.text_splitter as text_splitter from dotenv import load_dotenv import tempfile import hashlib from langchain.schema import Document import logging from my_llm.pubsub_manager import PubSubManager import datetime from .database import setup_database from .database import delete_row_from_source from .database import return_sources_last24 load_dotenv() def contains_url(message_data): url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') if url_pattern.search(message_data): return True else: return False def extract_urls(text): url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') urls = url_pattern.findall(text) return urls # utility functions def convert_to_txt(file_path): file_dir, file_name = os.path.split(file_path) file_base, file_ext = os.path.splitext(file_name) txt_file = os.path.join(file_dir, f"{file_base}.txt") shutil.copyfile(file_path, txt_file) return txt_file def compute_sha1_from_file(file_path): with open(file_path, "rb") as file: bytes = file.read() readable_hash = hashlib.sha1(bytes).hexdigest() return readable_hash def compute_sha1_from_content(content): readable_hash = hashlib.sha1(content).hexdigest() return readable_hash def add_file_to_gcs(filename: str, vector_name:str, bucket_name: str=None, metadata:dict=None): storage_client = storage.Client() bucket_name = bucket_name if bucket_name is not None else os.getenv('GCS_BUCKET', None) if bucket_name is None: raise ValueError("No bucket found to upload to: GCS_BUCKET returned None") if bucket_name.startswith("gs://"): bucket_name = bucket_name.removeprefix("gs://") logging.info(f"Bucket_name: {bucket_name}") bucket = storage_client.get_bucket(bucket_name) now = datetime.datetime.now() year = now.strftime("%Y") month = now.strftime("%m") day = now.strftime("%d") hour = now.strftime("%H") bucket_filepath = f"{vector_name}/{year}/{month}/{day}/{hour}/{os.path.basename(filename)}" blob = bucket.blob(bucket_filepath) the_metadata = { "vector_name": vector_name, } if metadata is not None: the_metadata.update(metadata) blob.metadata = the_metadata #TODO: create cloud storage pubsub subscription? blob.upload_from_filename(filename) logging.info(f"File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}") # create pubsub topic and subscription if necessary to receive notifications from cloud storage pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"app_to_pubsub_{vector_name}") sub_name = f"pubsub_to_store_{vector_name}" sub_exists = pubsub_manager.subscription_exists(sub_name) if not sub_exists: pubsub_manager.create_subscription(sub_name, push_endpoint=f"/pubsub_to_store/{vector_name}") setup_database(vector_name) return f"gs://{bucket_name}/{bucket_filepath}" def read_url_to_document(url: str, metadata: dict = None): loader = UnstructuredURLLoader(urls=[url]) docs = loader.load() if metadata is not None: for doc in docs: doc.metadata.update(metadata) logging.info(f"UnstructuredURLLoader docs: {docs}") return docs def read_file_to_document(gs_file: pathlib.Path, split=False, metadata: dict = None): #file_sha1 = compute_sha1_from_file(gs_file.name) try: #TODO: Use UnstructuredAPIFileLoader instead? logging.info(f"Sending {gs_file} to UnstructuredAPIFileLoader") loader = UnstructuredAPIFileLoader(gs_file, mode="elements", api_key="FAKE_API_KEY") if split: # only supported for some file types docs = loader.load_and_split() else: docs = loader.load() logging.info(f"Loaded docs for {gs_file} from UnstructuredAPIFileLoader") except ValueError as e: logging.info(f"Error for {gs_file} from UnstructuredAPIFileLoader: {str(e)}") if "file type is not supported in partition" in str(e): logging.info("trying locally via .txt conversion") txt_file = None try: # Convert the file to .txt and try again txt_file = convert_to_txt(gs_file) loader = UnstructuredFileLoader(txt_file, mode="elements") if split: docs = loader.load_and_split() else: docs = loader.load() except Exception as inner_e: raise Exception("An error occurred during txt conversion or loading.") from inner_e finally: # Ensure cleanup happens if txt_file was created if txt_file is not None and os.path.exists(txt_file): os.remove(txt_file) else: raise except Exception as e: logging.error(f"An unexpected error occurred for {gs_file}: {str(e)}") raise for doc in docs: #doc.metadata["file_sha1"] = file_sha1 logging.info(f"doc_content: {doc.page_content[:30]}") if metadata is not None: doc.metadata.update(metadata) logging.info(f"gs_file: {gs_file} turned into {len(docs)} documents") return docs def choose_splitter(extension: str, chunk_size: int=1024, chunk_overlap:int=0): if extension == ".py": return text_splitter.PythonCodeTextSplitter() elif extension == ".md": return text_splitter.MarkdownTextSplitter() return text_splitter.RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) def remove_whitespace(page_content: str): return page_content.replace("\n", " ").replace("\r", " ").replace("\t", " ").replace(" ", " ") def chunk_doc_to_docs(documents: list, extension: str = ".md"): """Turns a Document object into a list of many Document chunks""" source_chunks = [] for document in documents: splitter = choose_splitter(extension) for chunk in splitter.split_text(remove_whitespace(document.page_content)): source_chunks.append(Document(page_content=chunk, metadata=document.metadata)) return source_chunks def data_to_embed_pubsub(data: dict, vector_name:str="documents"): """Triggered from a message on a Cloud Pub/Sub topic. Args: data JSON """ #hash = data['message']['data'] message_data = base64.b64decode(data['message']['data']).decode('utf-8') attributes = data['message'].get('attributes', {}) messageId = data['message'].get('messageId') publishTime = data['message'].get('publishTime') logging.info(f"data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}") logging.info(f"data_to_embed_pubsub data: {message_data}") # pubsub from a Google Cloud Storage push topic if attributes.get("eventType", None) is not None and attributes.get("payloadFormat", None) is not None: eventType = attributes.get("eventType") payloadFormat = attributes.get("payloadFormat") if eventType == "OBJECT_FINALIZE" and payloadFormat == "JSON_API_V1": logging.info("Got valid event from Google Cloud Storage") the_object = attributes.get("objectId", None) if the_object is None: logging.info("No object found") return attributes if the_object.endswith("/"): logging.info("GCS object is a directory only") return attributes # https://cloud.google.com/storage/docs/json_api/v1/objects#resource-representations message_data = 'gs://' + attributes.get("bucketId") + '/' + the_object if '/' in the_object: bucket_vector_name = the_object.split('/')[0] if len(bucket_vector_name) > 0 and vector_name != bucket_vector_name: logging.info(f"Overwriting vector_name {vector_name} with {bucket_vector_name}") vector_name = bucket_vector_name attributes["attrs"] = f"namespace:{vector_name}" logging.info(f"Constructed message_data: {message_data}") metadata = attributes logging.info(f"Found metadata in pubsub: {metadata}") chunks = [] if message_data.startswith('"gs://'): message_data = message_data.strip('\"') if message_data.startswith("gs://"): logging.info("Detected gs://") bucket_name, file_name = message_data[5:].split("/", 1) # Create a client storage_client = storage.Client() # Download the file from GCS bucket = storage_client.get_bucket(bucket_name) blob = bucket.blob(file_name) file_name=pathlib.Path(file_name) with tempfile.TemporaryDirectory() as temp_dir: tmp_file_path = os.path.join(temp_dir, file_name.name) blob.download_to_filename(tmp_file_path) the_metadata = { "source": message_data, "type": "file_load_gcs", "bucket_name": bucket_name } metadata.update(the_metadata) docs = read_file_to_document(tmp_file_path, metadata=metadata) chunks = chunk_doc_to_docs(docs, file_name.suffix) logging.info(f"Split {file_name} into {len(chunks)} chunks") elif message_data.startswith("http"): logging.info(f"Got http message: {message_data}") # just in case, extract the URL again urls = extract_urls(message_data) docs = [] for url in urls: metadata["source"] = url metadata["url"] = url metadata["type"] = "url_load" doc = read_url_to_document(url, metadata=metadata) docs.extend(doc) chunks = chunk_doc_to_docs(docs) logging.info(f"Split {url} into {len(chunks)} chunks") else: logging.info("No gs:// detected") the_json = json.loads(message_data) the_metadata = the_json.get("metadata", {}) metadata.update(the_metadata) the_content = the_json.get("page_content", None) if metadata.get("source", None) is not None: metadata["source"] = "No source embedded" if the_content is None: logging.info("No content found") return {"metadata": "No content found"} docs = [Document(page_content=the_content, metadata=metadata)] publish_if_urls(the_content, vector_name) chunks = chunk_doc_to_docs(docs) logging.info(f"Split content into {len(chunks)} chunks") publish_chunks(chunks, vector_name=vector_name) logging.info(f"data_to_embed_pubsub published chunks with metadata: {metadata}") pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"pubsub_state_messages") pubsub_manager.publish_message(f"pubsub_chunk - Added doc with metadata: {metadata} to {vector_name}") return metadata def publish_if_urls(the_content, vector_name): """ Extracts URLs and puts them in a queue for processing on PubSub """ if contains_url(the_content): logging.info("Detected http://") urls = extract_urls(the_content) for url in urls: publish_text(url, vector_name) def publish_chunks(chunks: list[Document], vector_name: str): logging.info("Publishing chunks to embed_chunk") pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"embed_chunk_{vector_name}") sub_name = f"pubsub_chunk_to_store_{vector_name}" sub_exists = pubsub_manager.subscription_exists(sub_name) if not sub_exists: pubsub_manager.create_subscription(sub_name, push_endpoint=f"/pubsub_chunk_to_store/{vector_name}") setup_database(vector_name) for chunk in chunks: # Convert chunk to string, as Pub/Sub messages must be strings or bytes chunk_str = chunk.json() pubsub_manager.publish_message(chunk_str) def publish_text(text:str, vector_name: str): logging.info(f"Publishing text to app_to_pubsub_{vector_name}") pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"app_to_pubsub_{vector_name}") sub_name = f"pubsub_to_store_{vector_name}" sub_exists = pubsub_manager.subscription_exists(sub_name) if not sub_exists: pubsub_manager.create_subscription(sub_name, push_endpoint=f"/pubsub_chunk_to_store/{vector_name}") setup_database(vector_name) pubsub_manager.publish_message(text) def delete_source(source:str, vector_name:str): logging.info(f"Deleting source: {source} from {vector_name}") delete_row_from_source(source, vector_name) logging.info(f"Deleted source: {source} from {vector_name}") def return_sources_last24_(vector_name:str): logging.info(f"Returning sources last 24") rows = return_sources_last24(vector_name) return rows
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.document_loaders.UnstructuredURLLoader", "langchain.document_loaders.unstructured.UnstructuredAPIFileLoader", "langchain.text_splitter.MarkdownTextSplitter", "langchain.schema.Document", "langchain.document_loaders.unstructured.UnstructuredFileLoader", "langchain.text_splitter.PythonCodeTextSplitter" ]
[((719, 732), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (730, 732), False, 'from dotenv import load_dotenv\n'), ((784, 892), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (794, 892), False, 'import os, shutil, json, re\n'), ((1015, 1123), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (1025, 1123), False, 'import os, shutil, json, re\n'), ((1242, 1266), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (1255, 1266), False, 'import os, shutil, json, re\n'), ((1293, 1320), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1309, 1320), False, 'import os, shutil, json, re\n'), ((1336, 1378), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{file_base}.txt"""'], {}), "(file_dir, f'{file_base}.txt')\n", (1348, 1378), False, 'import os, shutil, json, re\n'), ((1383, 1419), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'txt_file'], {}), '(file_path, txt_file)\n', (1398, 1419), False, 'import os, shutil, json, re\n'), ((1869, 1885), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (1883, 1885), False, 'from google.cloud import storage\n'), ((2200, 2243), 'logging.info', 'logging.info', (['f"""Bucket_name: {bucket_name}"""'], {}), "(f'Bucket_name: {bucket_name}')\n", (2212, 2243), False, 'import logging\n'), ((2306, 2329), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2327, 2329), False, 'import datetime\n'), ((2853, 2939), 'logging.info', 'logging.info', (['f"""File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}"""'], {}), "(\n f'File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}')\n", (2865, 2939), False, 'import logging\n'), ((3058, 3129), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""app_to_pubsub_{vector_name}"""'}), "(vector_name, pubsub_topic=f'app_to_pubsub_{vector_name}')\n", (3071, 3129), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((3584, 3617), 'langchain.document_loaders.UnstructuredURLLoader', 'UnstructuredURLLoader', ([], {'urls': '[url]'}), '(urls=[url])\n', (3605, 3617), False, 'from langchain.document_loaders import UnstructuredURLLoader\n'), ((3748, 3799), 'logging.info', 'logging.info', (['f"""UnstructuredURLLoader docs: {docs}"""'], {}), "(f'UnstructuredURLLoader docs: {docs}')\n", (3760, 3799), False, 'import logging\n'), ((6112, 6212), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'text_splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (6156, 6212), True, 'import langchain.text_splitter as text_splitter\n'), ((7229, 7342), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}"""'], {}), "(\n f'data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}'\n )\n", (7241, 7342), False, 'import logging\n'), ((7337, 7395), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub data: {message_data}"""'], {}), "(f'data_to_embed_pubsub data: {message_data}')\n", (7349, 7395), False, 'import logging\n'), ((8835, 8888), 'logging.info', 'logging.info', (['f"""Found metadata in pubsub: {metadata}"""'], {}), "(f'Found metadata in pubsub: {metadata}')\n", (8847, 8888), False, 'import logging\n'), ((11324, 11409), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub published chunks with metadata: {metadata}"""'], {}), "(f'data_to_embed_pubsub published chunks with metadata: {metadata}'\n )\n", (11336, 11409), False, 'import logging\n'), ((11426, 11491), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""pubsub_state_messages"""'}), "(vector_name, pubsub_topic=f'pubsub_state_messages')\n", (11439, 11491), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((12019, 12067), 'logging.info', 'logging.info', (['"""Publishing chunks to embed_chunk"""'], {}), "('Publishing chunks to embed_chunk')\n", (12031, 12067), False, 'import logging\n'), ((12094, 12163), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""embed_chunk_{vector_name}"""'}), "(vector_name, pubsub_topic=f'embed_chunk_{vector_name}')\n", (12107, 12163), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((12749, 12812), 'logging.info', 'logging.info', (['f"""Publishing text to app_to_pubsub_{vector_name}"""'], {}), "(f'Publishing text to app_to_pubsub_{vector_name}')\n", (12761, 12812), False, 'import logging\n'), ((12834, 12905), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""app_to_pubsub_{vector_name}"""'}), "(vector_name, pubsub_topic=f'app_to_pubsub_{vector_name}')\n", (12847, 12905), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((13331, 13392), 'logging.info', 'logging.info', (['f"""Deleting source: {source} from {vector_name}"""'], {}), "(f'Deleting source: {source} from {vector_name}')\n", (13343, 13392), False, 'import logging\n'), ((13445, 13505), 'logging.info', 'logging.info', (['f"""Deleted source: {source} from {vector_name}"""'], {}), "(f'Deleted source: {source} from {vector_name}')\n", (13457, 13505), False, 'import logging\n'), ((13557, 13599), 'logging.info', 'logging.info', (['f"""Returning sources last 24"""'], {}), "(f'Returning sources last 24')\n", (13569, 13599), False, 'import logging\n'), ((1949, 1978), 'os.getenv', 'os.getenv', (['"""GCS_BUCKET"""', 'None'], {}), "('GCS_BUCKET', None)\n", (1958, 1978), False, 'import os, shutil, json, re\n'), ((4044, 4107), 'logging.info', 'logging.info', (['f"""Sending {gs_file} to UnstructuredAPIFileLoader"""'], {}), "(f'Sending {gs_file} to UnstructuredAPIFileLoader')\n", (4056, 4107), False, 'import logging\n'), ((4125, 4200), 'langchain.document_loaders.unstructured.UnstructuredAPIFileLoader', 'UnstructuredAPIFileLoader', (['gs_file'], {'mode': '"""elements"""', 'api_key': '"""FAKE_API_KEY"""'}), "(gs_file, mode='elements', api_key='FAKE_API_KEY')\n", (4150, 4200), False, 'from langchain.document_loaders.unstructured import UnstructuredAPIFileLoader\n'), ((5632, 5685), 'logging.info', 'logging.info', (['f"""doc_content: {doc.page_content[:30]}"""'], {}), "(f'doc_content: {doc.page_content[:30]}')\n", (5644, 5685), False, 'import logging\n'), ((5976, 6014), 'langchain.text_splitter.PythonCodeTextSplitter', 'text_splitter.PythonCodeTextSplitter', ([], {}), '()\n', (6012, 6014), True, 'import langchain.text_splitter as text_splitter\n'), ((9047, 9077), 'logging.info', 'logging.info', (['"""Detected gs://"""'], {}), "('Detected gs://')\n", (9059, 9077), False, 'import logging\n'), ((9194, 9210), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (9208, 9210), False, 'from google.cloud import storage\n'), ((9362, 9385), 'pathlib.Path', 'pathlib.Path', (['file_name'], {}), '(file_name)\n', (9374, 9385), False, 'import pathlib\n'), ((11795, 11827), 'logging.info', 'logging.info', (['"""Detected http://"""'], {}), "('Detected http://')\n", (11807, 11827), False, 'import logging\n'), ((1691, 1712), 'hashlib.sha1', 'hashlib.sha1', (['content'], {}), '(content)\n', (1703, 1712), False, 'import hashlib\n'), ((2518, 2544), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2534, 2544), False, 'import os, shutil, json, re\n'), ((4379, 4452), 'logging.info', 'logging.info', (['f"""Loaded docs for {gs_file} from UnstructuredAPIFileLoader"""'], {}), "(f'Loaded docs for {gs_file} from UnstructuredAPIFileLoader')\n", (4391, 4452), False, 'import logging\n'), ((6059, 6095), 'langchain.text_splitter.MarkdownTextSplitter', 'text_splitter.MarkdownTextSplitter', ([], {}), '()\n', (6093, 6095), True, 'import langchain.text_splitter as text_splitter\n'), ((7009, 7050), 'base64.b64decode', 'base64.b64decode', (["data['message']['data']"], {}), "(data['message']['data'])\n", (7025, 7050), False, 'import base64\n'), ((7751, 7808), 'logging.info', 'logging.info', (['"""Got valid event from Google Cloud Storage"""'], {}), "('Got valid event from Google Cloud Storage')\n", (7763, 7808), False, 'import logging\n'), ((8741, 8798), 'logging.info', 'logging.info', (['f"""Constructed message_data: {message_data}"""'], {}), "(f'Constructed message_data: {message_data}')\n", (8753, 8798), False, 'import logging\n'), ((9400, 9429), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9427, 9429), False, 'import tempfile\n'), ((9471, 9509), 'os.path.join', 'os.path.join', (['temp_dir', 'file_name.name'], {}), '(temp_dir, file_name.name)\n', (9483, 9509), False, 'import os, shutil, json, re\n'), ((10036, 10085), 'logging.info', 'logging.info', (['f"""Got http message: {message_data}"""'], {}), "(f'Got http message: {message_data}')\n", (10048, 10085), False, 'import logging\n'), ((10548, 10581), 'logging.info', 'logging.info', (['"""No gs:// detected"""'], {}), "('No gs:// detected')\n", (10560, 10581), False, 'import logging\n'), ((10610, 10634), 'json.loads', 'json.loads', (['message_data'], {}), '(message_data)\n', (10620, 10634), False, 'import os, shutil, json, re\n'), ((1573, 1592), 'hashlib.sha1', 'hashlib.sha1', (['bytes'], {}), '(bytes)\n', (1585, 1592), False, 'import hashlib\n'), ((4643, 4693), 'logging.info', 'logging.info', (['"""trying locally via .txt conversion"""'], {}), "('trying locally via .txt conversion')\n", (4655, 4693), False, 'import logging\n'), ((6705, 6761), 'langchain.schema.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'document.metadata'}), '(page_content=chunk, metadata=document.metadata)\n', (6713, 6761), False, 'from langchain.schema import Document\n'), ((7919, 7950), 'logging.info', 'logging.info', (['"""No object found"""'], {}), "('No object found')\n", (7931, 7950), False, 'import logging\n'), ((8055, 8101), 'logging.info', 'logging.info', (['"""GCS object is a directory only"""'], {}), "('GCS object is a directory only')\n", (8067, 8101), False, 'import logging\n'), ((10935, 10967), 'logging.info', 'logging.info', (['"""No content found"""'], {}), "('No content found')\n", (10947, 10967), False, 'import logging\n'), ((11045, 11098), 'langchain.schema.Document', 'Document', ([], {'page_content': 'the_content', 'metadata': 'metadata'}), '(page_content=the_content, metadata=metadata)\n', (11053, 11098), False, 'from langchain.schema import Document\n'), ((4872, 4921), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['txt_file'], {'mode': '"""elements"""'}), "(txt_file, mode='elements')\n", (4894, 4921), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((8533, 8618), 'logging.info', 'logging.info', (['f"""Overwriting vector_name {vector_name} with {bucket_vector_name}"""'], {}), "(f'Overwriting vector_name {vector_name} with {bucket_vector_name}'\n )\n", (8545, 8618), False, 'import logging\n'), ((5335, 5359), 'os.path.exists', 'os.path.exists', (['txt_file'], {}), '(txt_file)\n', (5349, 5359), False, 'import os, shutil, json, re\n'), ((5381, 5400), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (5390, 5400), False, 'import os, shutil, json, re\n')]
import langchain from langchain.llms import VertexAI from langchain.prompts import PromptTemplate, load_prompt import wandb from wandb.integration.langchain import WandbTracer import streamlit as st from google.oauth2 import service_account # account_info = dict(st.secrets["GOOGLE_APPLICATION_CREDENTIALS"]) # credentials = service_account.Credentials.from_service_account_info(account_info) def generate_prd_v3_palm(new_feature, new_feature_desc, wandb_name): wandb.login(key=st.secrets["WANDB_API_KEY"]) wandb.init( project="generate_prd_v3_palm", config={ "model": "text-bison-001", "temperature": 0.2 }, entity="arihantsheth", name=wandb_name, ) # llm = VertexAI(credentials=credentials, max_output_tokens=1024) llm = VertexAI(project="synap-labs-390404", location="us-central1", credentials=dict( st.secrets["GOOGLE_APPLICATION_CREDENTIALS"]), max_output_tokens=1024) prompt_template = load_prompt("prompt_templates/generate_prd_template_v2.json") # For deployment # prompt_template = load_prompt("../prompt_templates/generate_prd_template_v3.json") # For local testing prompt = prompt_template.format( new_feature=new_feature, new_feature_desc=new_feature_desc) try: output = llm(prompt, callbacks=[WandbTracer()]) except Exception as e: print("GCP Authentication error") print(e) return # with open(f"./generated_prds/{new_feature}_prd_v3_palm.md", "w") as f: # For deployment # # with open(f"../generated_prds/{new_feature}_prd_palm.md", "w") as f: # For local testing # f.write(output) wandb.finish() return output
[ "langchain.prompts.load_prompt" ]
[((469, 513), 'wandb.login', 'wandb.login', ([], {'key': "st.secrets['WANDB_API_KEY']"}), "(key=st.secrets['WANDB_API_KEY'])\n", (480, 513), False, 'import wandb\n'), ((519, 666), 'wandb.init', 'wandb.init', ([], {'project': '"""generate_prd_v3_palm"""', 'config': "{'model': 'text-bison-001', 'temperature': 0.2}", 'entity': '"""arihantsheth"""', 'name': 'wandb_name'}), "(project='generate_prd_v3_palm', config={'model':\n 'text-bison-001', 'temperature': 0.2}, entity='arihantsheth', name=\n wandb_name)\n", (529, 666), False, 'import wandb\n'), ((993, 1054), 'langchain.prompts.load_prompt', 'load_prompt', (['"""prompt_templates/generate_prd_template_v2.json"""'], {}), "('prompt_templates/generate_prd_template_v2.json')\n", (1004, 1054), False, 'from langchain.prompts import PromptTemplate, load_prompt\n'), ((1679, 1693), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (1691, 1693), False, 'import wandb\n'), ((1339, 1352), 'wandb.integration.langchain.WandbTracer', 'WandbTracer', ([], {}), '()\n', (1350, 1352), False, 'from wandb.integration.langchain import WandbTracer\n')]
import asyncio import inspect import warnings from abc import ABC, abstractmethod from functools import partial from typing import ( Any, AsyncIterator, Dict, Iterator, List, Optional, Sequence, cast, ) import langchain from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain.load.dump import dumpd, dumps from langchain.prompts.base import StringPromptValue from langchain.prompts.chat import ChatPromptValue from langchain.pydantic_v1 import Field, root_validator from langchain.schema import ( ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo, ) from langchain.schema.language_model import BaseLanguageModel, LanguageModelInput from langchain.schema.messages import ( AIMessage, BaseMessage, BaseMessageChunk, HumanMessage, ) from langchain.schema.output import ChatGenerationChunk from langchain.schema.runnable import RunnableConfig def _get_verbosity() -> bool: return langchain.verbose class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC): """Base class for Chat models.""" cache: Optional[bool] = None """Whether to cache the response.""" verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) """Callbacks to add to the run trace.""" callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) """Callback manager to add to the run trace.""" tags: Optional[List[str]] = Field(default=None, exclude=True) """Tags to add to the run trace.""" metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True) """Metadata to add to the run trace.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True # --- Runnable methods --- def _convert_input(self, input: LanguageModelInput) -> PromptValue: if isinstance(input, PromptValue): return input elif isinstance(input, str): return StringPromptValue(text=input) elif isinstance(input, list): return ChatPromptValue(messages=input) else: raise ValueError( f"Invalid input type {type(input)}. " "Must be a PromptValue, str, or list of BaseMessages." ) def invoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> BaseMessageChunk: config = config or {} return cast( BaseMessageChunk, cast( ChatGeneration, self.generate_prompt( [self._convert_input(input)], stop=stop, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), **kwargs, ).generations[0][0], ).message, ) async def ainvoke( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> BaseMessageChunk: if type(self)._agenerate == BaseChatModel._agenerate: # model doesn't implement async generation, so use default implementation return await asyncio.get_running_loop().run_in_executor( None, partial(self.invoke, input, config, stop=stop, **kwargs) ) config = config or {} llm_result = await self.agenerate_prompt( [self._convert_input(input)], stop=stop, callbacks=config.get("callbacks"), tags=config.get("tags"), metadata=config.get("metadata"), **kwargs, ) return cast( BaseMessageChunk, cast(ChatGeneration, llm_result.generations[0][0]).message ) def stream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> Iterator[BaseMessageChunk]: if type(self)._stream == BaseChatModel._stream: # model doesn't implement streaming, so use default implementation yield self.invoke(input, config=config, stop=stop, **kwargs) else: config = config or {} messages = self._convert_input(input).to_messages() params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop, **kwargs} callback_manager = CallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = callback_manager.on_chat_model_start( dumpd(self), [messages], invocation_params=params, options=options ) try: message: Optional[BaseMessageChunk] = None for chunk in self._stream( messages, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.message if message is None: message = chunk.message else: message += chunk.message assert message is not None except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e else: run_manager.on_llm_end( LLMResult(generations=[[ChatGeneration(message=message)]]), ) async def astream( self, input: LanguageModelInput, config: Optional[RunnableConfig] = None, *, stop: Optional[List[str]] = None, **kwargs: Any, ) -> AsyncIterator[BaseMessageChunk]: if type(self)._astream == BaseChatModel._astream: # model doesn't implement streaming, so use default implementation yield self.invoke(input, config=config, stop=stop, **kwargs) else: config = config or {} messages = self._convert_input(input).to_messages() params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop, **kwargs} callback_manager = AsyncCallbackManager.configure( config.get("callbacks"), self.callbacks, self.verbose, config.get("tags"), self.tags, config.get("metadata"), self.metadata, ) (run_manager,) = await callback_manager.on_chat_model_start( dumpd(self), [messages], invocation_params=params, options=options ) try: message: Optional[BaseMessageChunk] = None async for chunk in self._astream( messages, stop=stop, run_manager=run_manager, **kwargs ): yield chunk.message if message is None: message = chunk.message else: message += chunk.message assert message is not None except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e) raise e else: await run_manager.on_llm_end( LLMResult(generations=[[ChatGeneration(message=message)]]), ) # --- Custom methods --- def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: return {} def _get_invocation_params( self, stop: Optional[List[str]] = None, **kwargs: Any, ) -> dict: params = self.dict() params["stop"] = stop return {**params, **kwargs} def _get_llm_string(self, stop: Optional[List[str]] = None, **kwargs: Any) -> str: if self.lc_serializable: params = {**kwargs, **{"stop": stop}} param_string = str(sorted([(k, v) for k, v in params.items()])) llm_string = dumps(self) return llm_string + "---" + param_string else: params = self._get_invocation_params(stop=stop, **kwargs) params = {**params, **kwargs} return str(sorted([(k, v) for k, v in params.items()])) def generate( self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> LLMResult: """Top Level call""" params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop} callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose, tags, self.tags, metadata, self.metadata, ) run_managers = callback_manager.on_chat_model_start( dumpd(self), messages, invocation_params=params, options=options ) results = [] for i, m in enumerate(messages): try: results.append( self._generate_with_cache( m, stop=stop, run_manager=run_managers[i] if run_managers else None, **kwargs, ) ) except (KeyboardInterrupt, Exception) as e: if run_managers: run_managers[i].on_llm_error(e) raise e flattened_outputs = [ LLMResult(generations=[res.generations], llm_output=res.llm_output) for res in results ] llm_output = self._combine_llm_outputs([res.llm_output for res in results]) generations = [res.generations for res in results] output = LLMResult(generations=generations, llm_output=llm_output) if run_managers: run_infos = [] for manager, flattened_output in zip(run_managers, flattened_outputs): manager.on_llm_end(flattened_output) run_infos.append(RunInfo(run_id=manager.run_id)) output.run = run_infos return output async def agenerate( self, messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Callbacks = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> LLMResult: """Top Level call""" params = self._get_invocation_params(stop=stop, **kwargs) options = {"stop": stop} callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose, tags, self.tags, metadata, self.metadata, ) run_managers = await callback_manager.on_chat_model_start( dumpd(self), messages, invocation_params=params, options=options ) results = await asyncio.gather( *[ self._agenerate_with_cache( m, stop=stop, run_manager=run_managers[i] if run_managers else None, **kwargs, ) for i, m in enumerate(messages) ], return_exceptions=True, ) exceptions = [] for i, res in enumerate(results): if isinstance(res, Exception): if run_managers: await run_managers[i].on_llm_error(res) exceptions.append(res) if exceptions: if run_managers: await asyncio.gather( *[ run_manager.on_llm_end( LLMResult( generations=[res.generations], llm_output=res.llm_output ) ) for run_manager, res in zip(run_managers, results) if not isinstance(res, Exception) ] ) raise exceptions[0] flattened_outputs = [ LLMResult(generations=[res.generations], llm_output=res.llm_output) for res in results ] llm_output = self._combine_llm_outputs([res.llm_output for res in results]) generations = [res.generations for res in results] output = LLMResult(generations=generations, llm_output=llm_output) await asyncio.gather( *[ run_manager.on_llm_end(flattened_output) for run_manager, flattened_output in zip( run_managers, flattened_outputs ) ] ) if run_managers: output.run = [ RunInfo(run_id=run_manager.run_id) for run_manager in run_managers ] return output def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: prompt_messages = [p.to_messages() for p in prompts] return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> LLMResult: prompt_messages = [p.to_messages() for p in prompts] return await self.agenerate( prompt_messages, stop=stop, callbacks=callbacks, **kwargs ) def _generate_with_cache( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) disregard_cache = self.cache is not None and not self.cache if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) if new_arg_supported: return self._generate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: return self._generate(messages, stop=stop, **kwargs) else: llm_string = self._get_llm_string(stop=stop, **kwargs) prompt = dumps(messages) cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): return ChatResult(generations=cache_val) else: if new_arg_supported: result = self._generate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: result = self._generate(messages, stop=stop, **kwargs) langchain.llm_cache.update(prompt, llm_string, result.generations) return result async def _agenerate_with_cache( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) disregard_cache = self.cache is not None and not self.cache if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) if new_arg_supported: return await self._agenerate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: return await self._agenerate(messages, stop=stop, **kwargs) else: llm_string = self._get_llm_string(stop=stop, **kwargs) prompt = dumps(messages) cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): return ChatResult(generations=cache_val) else: if new_arg_supported: result = await self._agenerate( messages, stop=stop, run_manager=run_manager, **kwargs ) else: result = await self._agenerate(messages, stop=stop, **kwargs) langchain.llm_cache.update(prompt, llm_string, result.generations) return result @abstractmethod def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Top Level call""" async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Top Level call""" raise NotImplementedError() def _stream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: raise NotImplementedError() def _astream( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> AsyncIterator[ChatGenerationChunk]: raise NotImplementedError() def __call__( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseMessage: generation = self.generate( [messages], stop=stop, callbacks=callbacks, **kwargs ).generations[0][0] if isinstance(generation, ChatGeneration): return generation.message else: raise ValueError("Unexpected generation type") async def _call_async( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseMessage: result = await self.agenerate( [messages], stop=stop, callbacks=callbacks, **kwargs ) generation = result.generations[0][0] if isinstance(generation, ChatGeneration): return generation.message else: raise ValueError("Unexpected generation type") def call_as_llm( self, message: str, stop: Optional[List[str]] = None, **kwargs: Any ) -> str: return self.predict(message, stop=stop, **kwargs) def predict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) result = self([HumanMessage(content=text)], stop=_stop, **kwargs) return result.content def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: if stop is None: _stop = None else: _stop = list(stop) return self(messages, stop=_stop, **kwargs) async def apredict( self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any ) -> str: if stop is None: _stop = None else: _stop = list(stop) result = await self._call_async( [HumanMessage(content=text)], stop=_stop, **kwargs ) return result.content async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any, ) -> BaseMessage: if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(messages, stop=_stop, **kwargs) @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {} @property @abstractmethod def _llm_type(self) -> str: """Return type of chat model.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict class SimpleChatModel(BaseChatModel): """Simple Chat Model.""" def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs) message = AIMessage(content=output_str) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) @abstractmethod def _call( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Simpler interface.""" async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: func = partial( self._generate, messages, stop=stop, run_manager=run_manager, **kwargs ) return await asyncio.get_event_loop().run_in_executor(None, func)
[ "langchain.pydantic_v1.Field", "langchain.callbacks.manager.AsyncCallbackManager.configure", "langchain.schema.messages.AIMessage", "langchain.schema.ChatResult", "langchain.load.dump.dumps", "langchain.callbacks.manager.CallbackManager.configure", "langchain.load.dump.dumpd", "langchain.schema.RunInfo", "langchain.schema.messages.HumanMessage", "langchain.prompts.chat.ChatPromptValue", "langchain.schema.ChatGeneration", "langchain.prompts.base.StringPromptValue", "langchain.llm_cache.lookup", "langchain.llm_cache.update", "langchain.schema.LLMResult", "langchain.pydantic_v1.root_validator" ]
[((1364, 1401), 'langchain.pydantic_v1.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1369, 1401), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1475, 1508), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1480, 1508), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1608, 1641), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1613, 1641), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1726, 1759), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1731, 1759), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1841, 1874), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1846, 1874), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1925, 1941), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (1939, 1941), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((9835, 9947), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose, tags,\n self.tags, metadata, self.metadata)\n', (9860, 9947), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((11036, 11093), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (11045, 11093), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((11869, 11986), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose,\n tags, self.tags, metadata, self.metadata)\n', (11899, 11986), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((13721, 13778), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (13730, 13778), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((22871, 22900), 'langchain.schema.messages.AIMessage', 'AIMessage', ([], {'content': 'output_str'}), '(content=output_str)\n', (22880, 22900), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((22922, 22953), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (22936, 22953), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((22969, 23005), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': '[generation]'}), '(generations=[generation])\n', (22979, 23005), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((23517, 23596), 'functools.partial', 'partial', (['self._generate', 'messages'], {'stop': 'stop', 'run_manager': 'run_manager'}), '(self._generate, messages, stop=stop, run_manager=run_manager, **kwargs)\n', (23524, 23596), False, 'from functools import partial\n'), ((2132, 2234), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2145, 2234), False, 'import warnings\n'), ((9118, 9129), 'langchain.load.dump.dumps', 'dumps', (['self'], {}), '(self)\n', (9123, 9129), False, 'from langchain.load.dump import dumpd, dumps\n'), ((10112, 10123), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (10117, 10123), False, 'from langchain.load.dump import dumpd, dumps\n'), ((10767, 10834), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (10776, 10834), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((13452, 13519), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (13461, 13519), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16027, 16042), 'langchain.load.dump.dumps', 'dumps', (['messages'], {}), '(messages)\n', (16032, 16042), False, 'from langchain.load.dump import dumpd, dumps\n'), ((16067, 16113), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (16093, 16113), False, 'import langchain\n'), ((17747, 17762), 'langchain.load.dump.dumps', 'dumps', (['messages'], {}), '(messages)\n', (17752, 17762), False, 'from langchain.load.dump import dumpd, dumps\n'), ((17787, 17833), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (17813, 17833), False, 'import langchain\n'), ((2713, 2742), 'langchain.prompts.base.StringPromptValue', 'StringPromptValue', ([], {'text': 'input'}), '(text=input)\n', (2730, 2742), False, 'from langchain.prompts.base import StringPromptValue\n'), ((4629, 4679), 'typing.cast', 'cast', (['ChatGeneration', 'llm_result.generations[0][0]'], {}), '(ChatGeneration, llm_result.generations[0][0])\n', (4633, 4679), False, 'from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Sequence, cast\n'), ((5755, 5766), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (5760, 5766), False, 'from langchain.load.dump import dumpd, dumps\n'), ((12158, 12169), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (12163, 12169), False, 'from langchain.load.dump import dumpd, dumps\n'), ((14101, 14135), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (14108, 14135), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16181, 16214), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': 'cache_val'}), '(generations=cache_val)\n', (16191, 16214), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16530, 16596), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result.generations'], {}), '(prompt, llm_string, result.generations)\n', (16556, 16596), False, 'import langchain\n'), ((17901, 17934), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': 'cache_val'}), '(generations=cache_val)\n', (17911, 17934), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((18264, 18330), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result.generations'], {}), '(prompt, llm_string, result.generations)\n', (18290, 18330), False, 'import langchain\n'), ((20898, 20924), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {'content': 'text'}), '(content=text)\n', (20910, 20924), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((2800, 2831), 'langchain.prompts.chat.ChatPromptValue', 'ChatPromptValue', ([], {'messages': 'input'}), '(messages=input)\n', (2815, 2831), False, 'from langchain.prompts.chat import ChatPromptValue\n'), ((4200, 4256), 'functools.partial', 'partial', (['self.invoke', 'input', 'config'], {'stop': 'stop'}), '(self.invoke, input, config, stop=stop, **kwargs)\n', (4207, 4256), False, 'from functools import partial\n'), ((7657, 7668), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (7662, 7668), False, 'from langchain.load.dump import dumpd, dumps\n'), ((11315, 11345), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'manager.run_id'}), '(run_id=manager.run_id)\n', (11322, 11345), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((15194, 15227), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (15211, 15227), False, 'import inspect\n'), ((16899, 16933), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (16916, 16933), False, 'import inspect\n'), ((21574, 21600), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {'content': 'text'}), '(content=text)\n', (21586, 21600), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((23640, 23664), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (23662, 23664), False, 'import asyncio\n'), ((4134, 4160), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (4158, 4160), False, 'import asyncio\n'), ((6521, 6552), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (6535, 6552), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((13049, 13116), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (13058, 13116), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((8442, 8473), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (8456, 8473), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n')]
import os from transformers import AutoTokenizer from configs import ( EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME, ) import importlib from text_splitter import zh_title_enhance as func_zh_title_enhance import langchain.document_loaders from langchain.docstore.document import Document from langchain.text_splitter import TextSplitter from pathlib import Path import json from concurrent.futures import ThreadPoolExecutor from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config import io from typing import List, Union, Callable, Dict, Optional, Tuple, Generator import chardet def validate_kb_name(knowledge_base_id: str) -> bool: # 检查是否包含预期外的字符或路径攻击关键字 if "../" in knowledge_base_id: return False return True def get_kb_path(knowledge_base_name: str): return os.path.join(KB_ROOT_PATH, knowledge_base_name) def get_doc_path(knowledge_base_name: str): return os.path.join(get_kb_path(knowledge_base_name), "content") def get_vs_path(knowledge_base_name: str, vector_name: str): return os.path.join(get_kb_path(knowledge_base_name), vector_name) def get_file_path(knowledge_base_name: str, doc_name: str): return os.path.join(get_doc_path(knowledge_base_name), doc_name) def list_kbs_from_folder(): return [f for f in os.listdir(KB_ROOT_PATH) if os.path.isdir(os.path.join(KB_ROOT_PATH, f))] def list_files_from_folder(kb_name: str): doc_path = get_doc_path(kb_name) return [file for file in os.listdir(doc_path) if os.path.isfile(os.path.join(doc_path, file))] def load_embeddings(model: str = EMBEDDING_MODEL, device: str = embedding_device()): ''' 从缓存中加载embeddings,可以避免多线程时竞争加载。 ''' from server.knowledge_base.kb_cache.base import embeddings_pool return embeddings_pool.load_embeddings(model=model, device=device) LOADER_DICT = {"UnstructuredHTMLLoader": ['.html'], "UnstructuredMarkdownLoader": ['.md'], "CustomJSONLoader": [".json"], "CSVLoader": [".csv"], "RapidOCRPDFLoader": [".pdf"], "RapidOCRLoader": ['.png', '.jpg', '.jpeg', '.bmp'], "UnstructuredFileLoader": ['.eml', '.msg', '.rst', '.rtf', '.txt', '.xml', '.docx', '.epub', '.odt', '.ppt', '.pptx', '.tsv'], } SUPPORTED_EXTS = [ext for sublist in LOADER_DICT.values() for ext in sublist] class CustomJSONLoader(langchain.document_loaders.JSONLoader): ''' langchain的JSONLoader需要jq,在win上使用不便,进行替代。针对langchain==0.0.286 ''' def __init__( self, file_path: Union[str, Path], content_key: Optional[str] = None, metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None, text_content: bool = True, json_lines: bool = False, ): """Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON or JSON Lines file. content_key (str): The key to use to extract the content from the JSON if results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicate whether the content is in string format, default to True. json_lines (bool): Boolean flag to indicate whether the input is in JSON Lines format. """ self.file_path = Path(file_path).resolve() self._content_key = content_key self._metadata_func = metadata_func self._text_content = text_content self._json_lines = json_lines def _parse(self, content: str, docs: List[Document]) -> None: """Convert given content to documents.""" data = json.loads(content) # Perform some validation # This is not a perfect validation, but it should catch most cases # and prevent the user from getting a cryptic error later on. if self._content_key is not None: self._validate_content_key(data) if self._metadata_func is not None: self._validate_metadata_func(data) for i, sample in enumerate(data, len(docs) + 1): text = self._get_text(sample=sample) metadata = self._get_metadata( sample=sample, source=str(self.file_path), seq_num=i ) docs.append(Document(page_content=text, metadata=metadata)) langchain.document_loaders.CustomJSONLoader = CustomJSONLoader def get_LoaderClass(file_extension): for LoaderClass, extensions in LOADER_DICT.items(): if file_extension in extensions: return LoaderClass # 把一些向量化共用逻辑从KnowledgeFile抽取出来,等langchain支持内存文件的时候,可以将非磁盘文件向量化 def get_loader(loader_name: str, file_path_or_content: Union[str, bytes, io.StringIO, io.BytesIO]): ''' 根据loader_name和文件路径或内容返回文档加载器。 ''' try: if loader_name in ["RapidOCRPDFLoader", "RapidOCRLoader"]: document_loaders_module = importlib.import_module('document_loaders') else: document_loaders_module = importlib.import_module('langchain.document_loaders') DocumentLoader = getattr(document_loaders_module, loader_name) except Exception as e: msg = f"为文件{file_path_or_content}查找加载器{loader_name}时出错:{e}" logger.error(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else None) document_loaders_module = importlib.import_module('langchain.document_loaders') DocumentLoader = getattr(document_loaders_module, "UnstructuredFileLoader") if loader_name == "UnstructuredFileLoader": loader = DocumentLoader(file_path_or_content, autodetect_encoding=True) elif loader_name == "CSVLoader": # 自动识别文件编码类型,避免langchain loader 加载文件报编码错误 with open(file_path_or_content, 'rb') as struct_file: encode_detect = chardet.detect(struct_file.read()) if encode_detect: loader = DocumentLoader(file_path_or_content, encoding=encode_detect["encoding"]) else: loader = DocumentLoader(file_path_or_content, encoding="utf-8") elif loader_name == "JSONLoader": loader = DocumentLoader(file_path_or_content, jq_schema=".", text_content=False) elif loader_name == "CustomJSONLoader": loader = DocumentLoader(file_path_or_content, text_content=False) elif loader_name == "UnstructuredMarkdownLoader": loader = DocumentLoader(file_path_or_content, mode="elements") elif loader_name == "UnstructuredHTMLLoader": loader = DocumentLoader(file_path_or_content, mode="elements") else: loader = DocumentLoader(file_path_or_content) return loader def make_text_splitter( splitter_name: str = TEXT_SPLITTER_NAME, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, llm_model: str = LLM_MODEL, ): """ 根据参数获取特定的分词器 """ splitter_name = splitter_name or "SpacyTextSplitter" try: if splitter_name == "MarkdownHeaderTextSplitter": # MarkdownHeaderTextSplitter特殊判定 headers_to_split_on = text_splitter_dict[splitter_name]['headers_to_split_on'] text_splitter = langchain.text_splitter.MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on) else: try: ## 优先使用用户自定义的text_splitter text_splitter_module = importlib.import_module('text_splitter') TextSplitter = getattr(text_splitter_module, splitter_name) except: ## 否则使用langchain的text_splitter text_splitter_module = importlib.import_module('langchain.text_splitter') TextSplitter = getattr(text_splitter_module, splitter_name) if text_splitter_dict[splitter_name]["source"] == "tiktoken": ## 从tiktoken加载 try: text_splitter = TextSplitter.from_tiktoken_encoder( encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"], pipeline="zh_core_web_sm", chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except: text_splitter = TextSplitter.from_tiktoken_encoder( encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"], chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_dict[splitter_name]["source"] == "huggingface": ## 从huggingface加载 if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "": config = get_model_worker_config(llm_model) text_splitter_dict[splitter_name]["tokenizer_name_or_path"] = \ config.get("model_path") if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "gpt2": from transformers import GPT2TokenizerFast from langchain.text_splitter import CharacterTextSplitter tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") else: ## 字符长度加载 tokenizer = AutoTokenizer.from_pretrained( text_splitter_dict[splitter_name]["tokenizer_name_or_path"], trust_remote_code=True) text_splitter = TextSplitter.from_huggingface_tokenizer( tokenizer=tokenizer, chunk_size=chunk_size, chunk_overlap=chunk_overlap ) else: try: text_splitter = TextSplitter( pipeline="zh_core_web_sm", chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except: text_splitter = TextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except Exception as e: print(e) text_splitter_module = importlib.import_module('langchain.text_splitter') TextSplitter = getattr(text_splitter_module, "RecursiveCharacterTextSplitter") text_splitter = TextSplitter(chunk_size=250, chunk_overlap=50) return text_splitter class KnowledgeFile: def __init__( self, filename: str, knowledge_base_name: str ): ''' 对应知识库目录中的文件,必须是磁盘上存在的才能进行向量化等操作。 ''' self.kb_name = knowledge_base_name self.filename = filename self.ext = os.path.splitext(filename)[-1].lower() if self.ext not in SUPPORTED_EXTS: raise ValueError(f"暂未支持的文件格式 {self.ext}") self.filepath = get_file_path(knowledge_base_name, filename) self.docs = None self.splited_docs = None self.document_loader_name = get_LoaderClass(self.ext) self.text_splitter_name = TEXT_SPLITTER_NAME def file2docs(self, refresh: bool=False): if self.docs is None or refresh: logger.info(f"{self.document_loader_name} used for {self.filepath}") loader = get_loader(self.document_loader_name, self.filepath) self.docs = loader.load() return self.docs def docs2texts( self, docs: List[Document] = None, zh_title_enhance: bool = ZH_TITLE_ENHANCE, refresh: bool = False, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, text_splitter: TextSplitter = None, ): docs = docs or self.file2docs(refresh=refresh) if not docs: return [] if self.ext not in [".csv"]: if text_splitter is None: text_splitter = make_text_splitter(splitter_name=self.text_splitter_name, chunk_size=chunk_size, chunk_overlap=chunk_overlap) if self.text_splitter_name == "MarkdownHeaderTextSplitter": docs = text_splitter.split_text(docs[0].page_content) for doc in docs: # 如果文档有元数据 if doc.metadata: doc.metadata["source"] = os.path.basename(self.filepath) else: docs = text_splitter.split_documents(docs) print(f"文档切分示例:{docs[0]}") if zh_title_enhance: docs = func_zh_title_enhance(docs) self.splited_docs = docs return self.splited_docs def file2text( self, zh_title_enhance: bool = ZH_TITLE_ENHANCE, refresh: bool = False, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, text_splitter: TextSplitter = None, ): if self.splited_docs is None or refresh: docs = self.file2docs() self.splited_docs = self.docs2texts(docs=docs, zh_title_enhance=zh_title_enhance, refresh=refresh, chunk_size=chunk_size, chunk_overlap=chunk_overlap, text_splitter=text_splitter) return self.splited_docs def file_exist(self): return os.path.isfile(self.filepath) def get_mtime(self): return os.path.getmtime(self.filepath) def get_size(self): return os.path.getsize(self.filepath) def files2docs_in_thread( files: List[Union[KnowledgeFile, Tuple[str, str], Dict]], chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, zh_title_enhance: bool = ZH_TITLE_ENHANCE, pool: ThreadPoolExecutor = None, ) -> Generator: ''' 利用多线程批量将磁盘文件转化成langchain Document. 如果传入参数是Tuple,形式为(filename, kb_name) 生成器返回值为 status, (kb_name, file_name, docs | error) ''' def file2docs(*, file: KnowledgeFile, **kwargs) -> Tuple[bool, Tuple[str, str, List[Document]]]: try: return True, (file.kb_name, file.filename, file.file2text(**kwargs)) except Exception as e: msg = f"从文件 {file.kb_name}/{file.filename} 加载文档时出错:{e}" logger.error(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else None) return False, (file.kb_name, file.filename, msg) kwargs_list = [] for i, file in enumerate(files): kwargs = {} try: if isinstance(file, tuple) and len(file) >= 2: filename=file[0] kb_name=file[1] file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name) elif isinstance(file, dict): filename = file.pop("filename") kb_name = file.pop("kb_name") kwargs.update(file) file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name) kwargs["file"] = file kwargs["chunk_size"] = chunk_size kwargs["chunk_overlap"] = chunk_overlap kwargs["zh_title_enhance"] = zh_title_enhance kwargs_list.append(kwargs) except Exception as e: yield False, (kb_name, filename, str(e)) for result in run_in_thread_pool(func=file2docs, params=kwargs_list, pool=pool): yield result if __name__ == "__main__": from pprint import pprint kb_file = KnowledgeFile(filename="test.txt", knowledge_base_name="samples") # kb_file.text_splitter_name = "RecursiveCharacterTextSplitter" docs = kb_file.file2docs() pprint(docs[-1]) docs = kb_file.file2text() pprint(docs[-1])
[ "langchain.text_splitter.TextSplitter.from_huggingface_tokenizer", "langchain.text_splitter.TextSplitter.from_tiktoken_encoder", "langchain.docstore.document.Document", "langchain.text_splitter.TextSplitter" ]
[((964, 1011), 'os.path.join', 'os.path.join', (['KB_ROOT_PATH', 'knowledge_base_name'], {}), '(KB_ROOT_PATH, knowledge_base_name)\n', (976, 1011), False, 'import os\n'), ((1789, 1807), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (1805, 1807), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((1940, 1999), 'server.knowledge_base.kb_cache.base.embeddings_pool.load_embeddings', 'embeddings_pool.load_embeddings', ([], {'model': 'model', 'device': 'device'}), '(model=model, device=device)\n', (1971, 1999), False, 'from server.knowledge_base.kb_cache.base import embeddings_pool\n'), ((15816, 15881), 'server.utils.run_in_thread_pool', 'run_in_thread_pool', ([], {'func': 'file2docs', 'params': 'kwargs_list', 'pool': 'pool'}), '(func=file2docs, params=kwargs_list, pool=pool)\n', (15834, 15881), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((16147, 16163), 'pprint.pprint', 'pprint', (['docs[-1]'], {}), '(docs[-1])\n', (16153, 16163), False, 'from pprint import pprint\n'), ((16200, 16216), 'pprint.pprint', 'pprint', (['docs[-1]'], {}), '(docs[-1])\n', (16206, 16216), False, 'from pprint import pprint\n'), ((4174, 4193), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (4184, 4193), False, 'import json\n'), ((13844, 13873), 'os.path.isfile', 'os.path.isfile', (['self.filepath'], {}), '(self.filepath)\n', (13858, 13873), False, 'import os\n'), ((13915, 13946), 'os.path.getmtime', 'os.path.getmtime', (['self.filepath'], {}), '(self.filepath)\n', (13931, 13946), False, 'import os\n'), ((13987, 14017), 'os.path.getsize', 'os.path.getsize', (['self.filepath'], {}), '(self.filepath)\n', (14002, 14017), False, 'import os\n'), ((1445, 1469), 'os.listdir', 'os.listdir', (['KB_ROOT_PATH'], {}), '(KB_ROOT_PATH)\n', (1455, 1469), False, 'import os\n'), ((1641, 1661), 'os.listdir', 'os.listdir', (['doc_path'], {}), '(doc_path)\n', (1651, 1661), False, 'import os\n'), ((5418, 5461), 'importlib.import_module', 'importlib.import_module', (['"""document_loaders"""'], {}), "('document_loaders')\n", (5441, 5461), False, 'import importlib\n'), ((5514, 5567), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (5537, 5567), False, 'import importlib\n'), ((5742, 5829), 'configs.logger.error', 'logger.error', (['f"""{e.__class__.__name__}: {msg}"""'], {'exc_info': '(e if log_verbose else None)'}), "(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else\n None)\n", (5754, 5829), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((5881, 5934), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (5904, 5934), False, 'import importlib\n'), ((10617, 10667), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (10640, 10667), False, 'import importlib\n'), ((10779, 10825), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': '(250)', 'chunk_overlap': '(50)'}), '(chunk_size=250, chunk_overlap=50)\n', (10791, 10825), False, 'from langchain.text_splitter import TextSplitter\n'), ((11618, 11686), 'configs.logger.info', 'logger.info', (['f"""{self.document_loader_name} used for {self.filepath}"""'], {}), "(f'{self.document_loader_name} used for {self.filepath}')\n", (11629, 11686), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((12910, 12937), 'text_splitter.zh_title_enhance', 'func_zh_title_enhance', (['docs'], {}), '(docs)\n', (12931, 12937), True, 'from text_splitter import zh_title_enhance as func_zh_title_enhance\n'), ((1499, 1528), 'os.path.join', 'os.path.join', (['KB_ROOT_PATH', 'f'], {}), '(KB_ROOT_PATH, f)\n', (1511, 1528), False, 'import os\n'), ((1692, 1720), 'os.path.join', 'os.path.join', (['doc_path', 'file'], {}), '(doc_path, file)\n', (1704, 1720), False, 'import os\n'), ((3852, 3867), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (3856, 3867), False, 'from pathlib import Path\n'), ((4809, 4855), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (4817, 4855), False, 'from langchain.docstore.document import Document\n'), ((7841, 7881), 'importlib.import_module', 'importlib.import_module', (['"""text_splitter"""'], {}), "('text_splitter')\n", (7864, 7881), False, 'import importlib\n'), ((14757, 14844), 'configs.logger.error', 'logger.error', (['f"""{e.__class__.__name__}: {msg}"""'], {'exc_info': '(e if log_verbose else None)'}), "(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else\n None)\n", (14769, 14844), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((8049, 8099), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (8072, 8099), False, 'import importlib\n'), ((8324, 8521), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], pipeline='zh_core_web_sm',\n chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n", (8358, 8521), False, 'from langchain.text_splitter import TextSplitter\n'), ((9895, 10012), 'langchain.text_splitter.TextSplitter.from_huggingface_tokenizer', 'TextSplitter.from_huggingface_tokenizer', ([], {'tokenizer': 'tokenizer', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(tokenizer=tokenizer, chunk_size=\n chunk_size, chunk_overlap=chunk_overlap)\n', (9934, 10012), False, 'from langchain.text_splitter import TextSplitter\n'), ((11140, 11166), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (11156, 11166), False, 'import os\n'), ((8691, 8861), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (8725, 8861), False, 'from langchain.text_splitter import TextSplitter\n'), ((9160, 9194), 'server.utils.get_model_worker_config', 'get_model_worker_config', (['llm_model'], {}), '(llm_model)\n', (9183, 9194), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((9592, 9633), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (9625, 9633), False, 'from transformers import GPT2TokenizerFast\n'), ((9699, 9818), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (["text_splitter_dict[splitter_name]['tokenizer_name_or_path']"], {'trust_remote_code': '(True)'}), "(text_splitter_dict[splitter_name][\n 'tokenizer_name_or_path'], trust_remote_code=True)\n", (9728, 9818), False, 'from transformers import AutoTokenizer\n'), ((10161, 10256), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(pipeline='zh_core_web_sm', chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (10173, 10256), False, 'from langchain.text_splitter import TextSplitter\n'), ((12717, 12748), 'os.path.basename', 'os.path.basename', (['self.filepath'], {}), '(self.filepath)\n', (12733, 12748), False, 'import os\n'), ((10407, 10471), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (10419, 10471), False, 'from langchain.text_splitter import TextSplitter\n')]
import os from transformers import AutoTokenizer from configs import ( EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME, ) import importlib from text_splitter import zh_title_enhance as func_zh_title_enhance import langchain.document_loaders from langchain.docstore.document import Document from langchain.text_splitter import TextSplitter from pathlib import Path import json from concurrent.futures import ThreadPoolExecutor from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config import io from typing import List, Union, Callable, Dict, Optional, Tuple, Generator import chardet def validate_kb_name(knowledge_base_id: str) -> bool: # 检查是否包含预期外的字符或路径攻击关键字 if "../" in knowledge_base_id: return False return True def get_kb_path(knowledge_base_name: str): return os.path.join(KB_ROOT_PATH, knowledge_base_name) def get_doc_path(knowledge_base_name: str): return os.path.join(get_kb_path(knowledge_base_name), "content") def get_vs_path(knowledge_base_name: str, vector_name: str): return os.path.join(get_kb_path(knowledge_base_name), vector_name) def get_file_path(knowledge_base_name: str, doc_name: str): return os.path.join(get_doc_path(knowledge_base_name), doc_name) def list_kbs_from_folder(): return [f for f in os.listdir(KB_ROOT_PATH) if os.path.isdir(os.path.join(KB_ROOT_PATH, f))] def list_files_from_folder(kb_name: str): doc_path = get_doc_path(kb_name) return [file for file in os.listdir(doc_path) if os.path.isfile(os.path.join(doc_path, file))] def load_embeddings(model: str = EMBEDDING_MODEL, device: str = embedding_device()): ''' 从缓存中加载embeddings,可以避免多线程时竞争加载。 ''' from server.knowledge_base.kb_cache.base import embeddings_pool return embeddings_pool.load_embeddings(model=model, device=device) LOADER_DICT = {"UnstructuredHTMLLoader": ['.html'], "UnstructuredMarkdownLoader": ['.md'], "CustomJSONLoader": [".json"], "CSVLoader": [".csv"], "RapidOCRPDFLoader": [".pdf"], "RapidOCRLoader": ['.png', '.jpg', '.jpeg', '.bmp'], "UnstructuredFileLoader": ['.eml', '.msg', '.rst', '.rtf', '.txt', '.xml', '.docx', '.epub', '.odt', '.ppt', '.pptx', '.tsv'], } SUPPORTED_EXTS = [ext for sublist in LOADER_DICT.values() for ext in sublist] class CustomJSONLoader(langchain.document_loaders.JSONLoader): ''' langchain的JSONLoader需要jq,在win上使用不便,进行替代。针对langchain==0.0.286 ''' def __init__( self, file_path: Union[str, Path], content_key: Optional[str] = None, metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None, text_content: bool = True, json_lines: bool = False, ): """Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON or JSON Lines file. content_key (str): The key to use to extract the content from the JSON if results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicate whether the content is in string format, default to True. json_lines (bool): Boolean flag to indicate whether the input is in JSON Lines format. """ self.file_path = Path(file_path).resolve() self._content_key = content_key self._metadata_func = metadata_func self._text_content = text_content self._json_lines = json_lines def _parse(self, content: str, docs: List[Document]) -> None: """Convert given content to documents.""" data = json.loads(content) # Perform some validation # This is not a perfect validation, but it should catch most cases # and prevent the user from getting a cryptic error later on. if self._content_key is not None: self._validate_content_key(data) if self._metadata_func is not None: self._validate_metadata_func(data) for i, sample in enumerate(data, len(docs) + 1): text = self._get_text(sample=sample) metadata = self._get_metadata( sample=sample, source=str(self.file_path), seq_num=i ) docs.append(Document(page_content=text, metadata=metadata)) langchain.document_loaders.CustomJSONLoader = CustomJSONLoader def get_LoaderClass(file_extension): for LoaderClass, extensions in LOADER_DICT.items(): if file_extension in extensions: return LoaderClass # 把一些向量化共用逻辑从KnowledgeFile抽取出来,等langchain支持内存文件的时候,可以将非磁盘文件向量化 def get_loader(loader_name: str, file_path_or_content: Union[str, bytes, io.StringIO, io.BytesIO]): ''' 根据loader_name和文件路径或内容返回文档加载器。 ''' try: if loader_name in ["RapidOCRPDFLoader", "RapidOCRLoader"]: document_loaders_module = importlib.import_module('document_loaders') else: document_loaders_module = importlib.import_module('langchain.document_loaders') DocumentLoader = getattr(document_loaders_module, loader_name) except Exception as e: msg = f"为文件{file_path_or_content}查找加载器{loader_name}时出错:{e}" logger.error(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else None) document_loaders_module = importlib.import_module('langchain.document_loaders') DocumentLoader = getattr(document_loaders_module, "UnstructuredFileLoader") if loader_name == "UnstructuredFileLoader": loader = DocumentLoader(file_path_or_content, autodetect_encoding=True) elif loader_name == "CSVLoader": # 自动识别文件编码类型,避免langchain loader 加载文件报编码错误 with open(file_path_or_content, 'rb') as struct_file: encode_detect = chardet.detect(struct_file.read()) if encode_detect: loader = DocumentLoader(file_path_or_content, encoding=encode_detect["encoding"]) else: loader = DocumentLoader(file_path_or_content, encoding="utf-8") elif loader_name == "JSONLoader": loader = DocumentLoader(file_path_or_content, jq_schema=".", text_content=False) elif loader_name == "CustomJSONLoader": loader = DocumentLoader(file_path_or_content, text_content=False) elif loader_name == "UnstructuredMarkdownLoader": loader = DocumentLoader(file_path_or_content, mode="elements") elif loader_name == "UnstructuredHTMLLoader": loader = DocumentLoader(file_path_or_content, mode="elements") else: loader = DocumentLoader(file_path_or_content) return loader def make_text_splitter( splitter_name: str = TEXT_SPLITTER_NAME, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, llm_model: str = LLM_MODEL, ): """ 根据参数获取特定的分词器 """ splitter_name = splitter_name or "SpacyTextSplitter" try: if splitter_name == "MarkdownHeaderTextSplitter": # MarkdownHeaderTextSplitter特殊判定 headers_to_split_on = text_splitter_dict[splitter_name]['headers_to_split_on'] text_splitter = langchain.text_splitter.MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on) else: try: ## 优先使用用户自定义的text_splitter text_splitter_module = importlib.import_module('text_splitter') TextSplitter = getattr(text_splitter_module, splitter_name) except: ## 否则使用langchain的text_splitter text_splitter_module = importlib.import_module('langchain.text_splitter') TextSplitter = getattr(text_splitter_module, splitter_name) if text_splitter_dict[splitter_name]["source"] == "tiktoken": ## 从tiktoken加载 try: text_splitter = TextSplitter.from_tiktoken_encoder( encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"], pipeline="zh_core_web_sm", chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except: text_splitter = TextSplitter.from_tiktoken_encoder( encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"], chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_dict[splitter_name]["source"] == "huggingface": ## 从huggingface加载 if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "": config = get_model_worker_config(llm_model) text_splitter_dict[splitter_name]["tokenizer_name_or_path"] = \ config.get("model_path") if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "gpt2": from transformers import GPT2TokenizerFast from langchain.text_splitter import CharacterTextSplitter tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") else: ## 字符长度加载 tokenizer = AutoTokenizer.from_pretrained( text_splitter_dict[splitter_name]["tokenizer_name_or_path"], trust_remote_code=True) text_splitter = TextSplitter.from_huggingface_tokenizer( tokenizer=tokenizer, chunk_size=chunk_size, chunk_overlap=chunk_overlap ) else: try: text_splitter = TextSplitter( pipeline="zh_core_web_sm", chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except: text_splitter = TextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except Exception as e: print(e) text_splitter_module = importlib.import_module('langchain.text_splitter') TextSplitter = getattr(text_splitter_module, "RecursiveCharacterTextSplitter") text_splitter = TextSplitter(chunk_size=250, chunk_overlap=50) return text_splitter class KnowledgeFile: def __init__( self, filename: str, knowledge_base_name: str ): ''' 对应知识库目录中的文件,必须是磁盘上存在的才能进行向量化等操作。 ''' self.kb_name = knowledge_base_name self.filename = filename self.ext = os.path.splitext(filename)[-1].lower() if self.ext not in SUPPORTED_EXTS: raise ValueError(f"暂未支持的文件格式 {self.ext}") self.filepath = get_file_path(knowledge_base_name, filename) self.docs = None self.splited_docs = None self.document_loader_name = get_LoaderClass(self.ext) self.text_splitter_name = TEXT_SPLITTER_NAME def file2docs(self, refresh: bool=False): if self.docs is None or refresh: logger.info(f"{self.document_loader_name} used for {self.filepath}") loader = get_loader(self.document_loader_name, self.filepath) self.docs = loader.load() return self.docs def docs2texts( self, docs: List[Document] = None, zh_title_enhance: bool = ZH_TITLE_ENHANCE, refresh: bool = False, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, text_splitter: TextSplitter = None, ): docs = docs or self.file2docs(refresh=refresh) if not docs: return [] if self.ext not in [".csv"]: if text_splitter is None: text_splitter = make_text_splitter(splitter_name=self.text_splitter_name, chunk_size=chunk_size, chunk_overlap=chunk_overlap) if self.text_splitter_name == "MarkdownHeaderTextSplitter": docs = text_splitter.split_text(docs[0].page_content) for doc in docs: # 如果文档有元数据 if doc.metadata: doc.metadata["source"] = os.path.basename(self.filepath) else: docs = text_splitter.split_documents(docs) print(f"文档切分示例:{docs[0]}") if zh_title_enhance: docs = func_zh_title_enhance(docs) self.splited_docs = docs return self.splited_docs def file2text( self, zh_title_enhance: bool = ZH_TITLE_ENHANCE, refresh: bool = False, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, text_splitter: TextSplitter = None, ): if self.splited_docs is None or refresh: docs = self.file2docs() self.splited_docs = self.docs2texts(docs=docs, zh_title_enhance=zh_title_enhance, refresh=refresh, chunk_size=chunk_size, chunk_overlap=chunk_overlap, text_splitter=text_splitter) return self.splited_docs def file_exist(self): return os.path.isfile(self.filepath) def get_mtime(self): return os.path.getmtime(self.filepath) def get_size(self): return os.path.getsize(self.filepath) def files2docs_in_thread( files: List[Union[KnowledgeFile, Tuple[str, str], Dict]], chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, zh_title_enhance: bool = ZH_TITLE_ENHANCE, pool: ThreadPoolExecutor = None, ) -> Generator: ''' 利用多线程批量将磁盘文件转化成langchain Document. 如果传入参数是Tuple,形式为(filename, kb_name) 生成器返回值为 status, (kb_name, file_name, docs | error) ''' def file2docs(*, file: KnowledgeFile, **kwargs) -> Tuple[bool, Tuple[str, str, List[Document]]]: try: return True, (file.kb_name, file.filename, file.file2text(**kwargs)) except Exception as e: msg = f"从文件 {file.kb_name}/{file.filename} 加载文档时出错:{e}" logger.error(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else None) return False, (file.kb_name, file.filename, msg) kwargs_list = [] for i, file in enumerate(files): kwargs = {} try: if isinstance(file, tuple) and len(file) >= 2: filename=file[0] kb_name=file[1] file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name) elif isinstance(file, dict): filename = file.pop("filename") kb_name = file.pop("kb_name") kwargs.update(file) file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name) kwargs["file"] = file kwargs["chunk_size"] = chunk_size kwargs["chunk_overlap"] = chunk_overlap kwargs["zh_title_enhance"] = zh_title_enhance kwargs_list.append(kwargs) except Exception as e: yield False, (kb_name, filename, str(e)) for result in run_in_thread_pool(func=file2docs, params=kwargs_list, pool=pool): yield result if __name__ == "__main__": from pprint import pprint kb_file = KnowledgeFile(filename="test.txt", knowledge_base_name="samples") # kb_file.text_splitter_name = "RecursiveCharacterTextSplitter" docs = kb_file.file2docs() pprint(docs[-1]) docs = kb_file.file2text() pprint(docs[-1])
[ "langchain.text_splitter.TextSplitter.from_huggingface_tokenizer", "langchain.text_splitter.TextSplitter.from_tiktoken_encoder", "langchain.docstore.document.Document", "langchain.text_splitter.TextSplitter" ]
[((964, 1011), 'os.path.join', 'os.path.join', (['KB_ROOT_PATH', 'knowledge_base_name'], {}), '(KB_ROOT_PATH, knowledge_base_name)\n', (976, 1011), False, 'import os\n'), ((1789, 1807), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (1805, 1807), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((1940, 1999), 'server.knowledge_base.kb_cache.base.embeddings_pool.load_embeddings', 'embeddings_pool.load_embeddings', ([], {'model': 'model', 'device': 'device'}), '(model=model, device=device)\n', (1971, 1999), False, 'from server.knowledge_base.kb_cache.base import embeddings_pool\n'), ((15816, 15881), 'server.utils.run_in_thread_pool', 'run_in_thread_pool', ([], {'func': 'file2docs', 'params': 'kwargs_list', 'pool': 'pool'}), '(func=file2docs, params=kwargs_list, pool=pool)\n', (15834, 15881), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((16147, 16163), 'pprint.pprint', 'pprint', (['docs[-1]'], {}), '(docs[-1])\n', (16153, 16163), False, 'from pprint import pprint\n'), ((16200, 16216), 'pprint.pprint', 'pprint', (['docs[-1]'], {}), '(docs[-1])\n', (16206, 16216), False, 'from pprint import pprint\n'), ((4174, 4193), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (4184, 4193), False, 'import json\n'), ((13844, 13873), 'os.path.isfile', 'os.path.isfile', (['self.filepath'], {}), '(self.filepath)\n', (13858, 13873), False, 'import os\n'), ((13915, 13946), 'os.path.getmtime', 'os.path.getmtime', (['self.filepath'], {}), '(self.filepath)\n', (13931, 13946), False, 'import os\n'), ((13987, 14017), 'os.path.getsize', 'os.path.getsize', (['self.filepath'], {}), '(self.filepath)\n', (14002, 14017), False, 'import os\n'), ((1445, 1469), 'os.listdir', 'os.listdir', (['KB_ROOT_PATH'], {}), '(KB_ROOT_PATH)\n', (1455, 1469), False, 'import os\n'), ((1641, 1661), 'os.listdir', 'os.listdir', (['doc_path'], {}), '(doc_path)\n', (1651, 1661), False, 'import os\n'), ((5418, 5461), 'importlib.import_module', 'importlib.import_module', (['"""document_loaders"""'], {}), "('document_loaders')\n", (5441, 5461), False, 'import importlib\n'), ((5514, 5567), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (5537, 5567), False, 'import importlib\n'), ((5742, 5829), 'configs.logger.error', 'logger.error', (['f"""{e.__class__.__name__}: {msg}"""'], {'exc_info': '(e if log_verbose else None)'}), "(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else\n None)\n", (5754, 5829), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((5881, 5934), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (5904, 5934), False, 'import importlib\n'), ((10617, 10667), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (10640, 10667), False, 'import importlib\n'), ((10779, 10825), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': '(250)', 'chunk_overlap': '(50)'}), '(chunk_size=250, chunk_overlap=50)\n', (10791, 10825), False, 'from langchain.text_splitter import TextSplitter\n'), ((11618, 11686), 'configs.logger.info', 'logger.info', (['f"""{self.document_loader_name} used for {self.filepath}"""'], {}), "(f'{self.document_loader_name} used for {self.filepath}')\n", (11629, 11686), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((12910, 12937), 'text_splitter.zh_title_enhance', 'func_zh_title_enhance', (['docs'], {}), '(docs)\n', (12931, 12937), True, 'from text_splitter import zh_title_enhance as func_zh_title_enhance\n'), ((1499, 1528), 'os.path.join', 'os.path.join', (['KB_ROOT_PATH', 'f'], {}), '(KB_ROOT_PATH, f)\n', (1511, 1528), False, 'import os\n'), ((1692, 1720), 'os.path.join', 'os.path.join', (['doc_path', 'file'], {}), '(doc_path, file)\n', (1704, 1720), False, 'import os\n'), ((3852, 3867), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (3856, 3867), False, 'from pathlib import Path\n'), ((4809, 4855), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (4817, 4855), False, 'from langchain.docstore.document import Document\n'), ((7841, 7881), 'importlib.import_module', 'importlib.import_module', (['"""text_splitter"""'], {}), "('text_splitter')\n", (7864, 7881), False, 'import importlib\n'), ((14757, 14844), 'configs.logger.error', 'logger.error', (['f"""{e.__class__.__name__}: {msg}"""'], {'exc_info': '(e if log_verbose else None)'}), "(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else\n None)\n", (14769, 14844), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((8049, 8099), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (8072, 8099), False, 'import importlib\n'), ((8324, 8521), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], pipeline='zh_core_web_sm',\n chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n", (8358, 8521), False, 'from langchain.text_splitter import TextSplitter\n'), ((9895, 10012), 'langchain.text_splitter.TextSplitter.from_huggingface_tokenizer', 'TextSplitter.from_huggingface_tokenizer', ([], {'tokenizer': 'tokenizer', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(tokenizer=tokenizer, chunk_size=\n chunk_size, chunk_overlap=chunk_overlap)\n', (9934, 10012), False, 'from langchain.text_splitter import TextSplitter\n'), ((11140, 11166), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (11156, 11166), False, 'import os\n'), ((8691, 8861), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (8725, 8861), False, 'from langchain.text_splitter import TextSplitter\n'), ((9160, 9194), 'server.utils.get_model_worker_config', 'get_model_worker_config', (['llm_model'], {}), '(llm_model)\n', (9183, 9194), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((9592, 9633), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (9625, 9633), False, 'from transformers import GPT2TokenizerFast\n'), ((9699, 9818), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (["text_splitter_dict[splitter_name]['tokenizer_name_or_path']"], {'trust_remote_code': '(True)'}), "(text_splitter_dict[splitter_name][\n 'tokenizer_name_or_path'], trust_remote_code=True)\n", (9728, 9818), False, 'from transformers import AutoTokenizer\n'), ((10161, 10256), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(pipeline='zh_core_web_sm', chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (10173, 10256), False, 'from langchain.text_splitter import TextSplitter\n'), ((12717, 12748), 'os.path.basename', 'os.path.basename', (['self.filepath'], {}), '(self.filepath)\n', (12733, 12748), False, 'import os\n'), ((10407, 10471), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (10419, 10471), False, 'from langchain.text_splitter import TextSplitter\n')]
"""Push and pull to the LangChain Hub.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional from langchain.load.dump import dumps from langchain.load.load import loads if TYPE_CHECKING: from langchainhub import Client def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client: try: from langchainhub import Client except ImportError as e: raise ImportError( "Could not import langchainhub, please install with `pip install " "langchainhub`." ) from e # Client logic will also attempt to load URL/key from environment variables return Client(api_url, api_key=api_key) def push( repo_full_name: str, object: Any, *, api_url: Optional[str] = None, api_key: Optional[str] = None, parent_commit_hash: Optional[str] = "latest", new_repo_is_public: bool = False, new_repo_description: str = "", ) -> str: """ Pushes an object to the hub and returns the URL it can be viewed at in a browser. :param repo_full_name: The full name of the repo to push to in the format of `owner/repo`. :param object: The LangChain to serialize and push to the hub. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. :param parent_commit_hash: The commit hash of the parent commit to push to. Defaults to the latest commit automatically. :param new_repo_is_public: Whether the repo should be public. Defaults to False (Private by default). :param new_repo_description: The description of the repo. Defaults to an empty string. """ client = _get_client(api_url=api_url, api_key=api_key) manifest_json = dumps(object) message = client.push( repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash, new_repo_is_public=new_repo_is_public, new_repo_description=new_repo_description, ) return message def pull( owner_repo_commit: str, *, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> Any: """ Pulls an object from the hub and returns it as a LangChain object. :param owner_repo_commit: The full name of the repo to pull from in the format of `owner/repo:commit_hash`. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. """ client = _get_client(api_url=api_url, api_key=api_key) resp: str = client.pull(owner_repo_commit) return loads(resp)
[ "langchain.load.load.loads", "langchainhub.Client", "langchain.load.dump.dumps" ]
[((671, 703), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (677, 703), False, 'from langchainhub import Client\n'), ((1907, 1920), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1912, 1920), False, 'from langchain.load.dump import dumps\n'), ((2857, 2868), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (2862, 2868), False, 'from langchain.load.load import loads\n')]
"""Push and pull to the LangChain Hub.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional from langchain.load.dump import dumps from langchain.load.load import loads if TYPE_CHECKING: from langchainhub import Client def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client: try: from langchainhub import Client except ImportError as e: raise ImportError( "Could not import langchainhub, please install with `pip install " "langchainhub`." ) from e # Client logic will also attempt to load URL/key from environment variables return Client(api_url, api_key=api_key) def push( repo_full_name: str, object: Any, *, api_url: Optional[str] = None, api_key: Optional[str] = None, parent_commit_hash: Optional[str] = "latest", new_repo_is_public: bool = False, new_repo_description: str = "", ) -> str: """ Pushes an object to the hub and returns the URL it can be viewed at in a browser. :param repo_full_name: The full name of the repo to push to in the format of `owner/repo`. :param object: The LangChain to serialize and push to the hub. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. :param parent_commit_hash: The commit hash of the parent commit to push to. Defaults to the latest commit automatically. :param new_repo_is_public: Whether the repo should be public. Defaults to False (Private by default). :param new_repo_description: The description of the repo. Defaults to an empty string. """ client = _get_client(api_url=api_url, api_key=api_key) manifest_json = dumps(object) message = client.push( repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash, new_repo_is_public=new_repo_is_public, new_repo_description=new_repo_description, ) return message def pull( owner_repo_commit: str, *, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> Any: """ Pulls an object from the hub and returns it as a LangChain object. :param owner_repo_commit: The full name of the repo to pull from in the format of `owner/repo:commit_hash`. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. """ client = _get_client(api_url=api_url, api_key=api_key) resp: str = client.pull(owner_repo_commit) return loads(resp)
[ "langchain.load.load.loads", "langchainhub.Client", "langchain.load.dump.dumps" ]
[((671, 703), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (677, 703), False, 'from langchainhub import Client\n'), ((1907, 1920), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1912, 1920), False, 'from langchain.load.dump import dumps\n'), ((2857, 2868), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (2862, 2868), False, 'from langchain.load.load import loads\n')]
"""Push and pull to the LangChain Hub.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional from langchain.load.dump import dumps from langchain.load.load import loads if TYPE_CHECKING: from langchainhub import Client def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client: try: from langchainhub import Client except ImportError as e: raise ImportError( "Could not import langchainhub, please install with `pip install " "langchainhub`." ) from e # Client logic will also attempt to load URL/key from environment variables return Client(api_url, api_key=api_key) def push( repo_full_name: str, object: Any, *, api_url: Optional[str] = None, api_key: Optional[str] = None, parent_commit_hash: Optional[str] = "latest", new_repo_is_public: bool = False, new_repo_description: str = "", ) -> str: """ Pushes an object to the hub and returns the URL it can be viewed at in a browser. :param repo_full_name: The full name of the repo to push to in the format of `owner/repo`. :param object: The LangChain to serialize and push to the hub. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. :param parent_commit_hash: The commit hash of the parent commit to push to. Defaults to the latest commit automatically. :param new_repo_is_public: Whether the repo should be public. Defaults to False (Private by default). :param new_repo_description: The description of the repo. Defaults to an empty string. """ client = _get_client(api_url=api_url, api_key=api_key) manifest_json = dumps(object) message = client.push( repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash, new_repo_is_public=new_repo_is_public, new_repo_description=new_repo_description, ) return message def pull( owner_repo_commit: str, *, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> Any: """ Pulls an object from the hub and returns it as a LangChain object. :param owner_repo_commit: The full name of the repo to pull from in the format of `owner/repo:commit_hash`. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. """ client = _get_client(api_url=api_url, api_key=api_key) resp: str = client.pull(owner_repo_commit) return loads(resp)
[ "langchain.load.load.loads", "langchainhub.Client", "langchain.load.dump.dumps" ]
[((671, 703), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (677, 703), False, 'from langchainhub import Client\n'), ((1907, 1920), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1912, 1920), False, 'from langchain.load.dump import dumps\n'), ((2857, 2868), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (2862, 2868), False, 'from langchain.load.load import loads\n')]
"""Push and pull to the LangChain Hub.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional from langchain.load.dump import dumps from langchain.load.load import loads if TYPE_CHECKING: from langchainhub import Client def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client: try: from langchainhub import Client except ImportError as e: raise ImportError( "Could not import langchainhub, please install with `pip install " "langchainhub`." ) from e # Client logic will also attempt to load URL/key from environment variables return Client(api_url, api_key=api_key) def push( repo_full_name: str, object: Any, *, api_url: Optional[str] = None, api_key: Optional[str] = None, parent_commit_hash: Optional[str] = "latest", new_repo_is_public: bool = False, new_repo_description: str = "", ) -> str: """ Pushes an object to the hub and returns the URL it can be viewed at in a browser. :param repo_full_name: The full name of the repo to push to in the format of `owner/repo`. :param object: The LangChain to serialize and push to the hub. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. :param parent_commit_hash: The commit hash of the parent commit to push to. Defaults to the latest commit automatically. :param new_repo_is_public: Whether the repo should be public. Defaults to False (Private by default). :param new_repo_description: The description of the repo. Defaults to an empty string. """ client = _get_client(api_url=api_url, api_key=api_key) manifest_json = dumps(object) message = client.push( repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash, new_repo_is_public=new_repo_is_public, new_repo_description=new_repo_description, ) return message def pull( owner_repo_commit: str, *, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> Any: """ Pulls an object from the hub and returns it as a LangChain object. :param owner_repo_commit: The full name of the repo to pull from in the format of `owner/repo:commit_hash`. :param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service if you have an api key set, or a localhost instance if not. :param api_key: The API key to use to authenticate with the LangChain Hub API. """ client = _get_client(api_url=api_url, api_key=api_key) resp: str = client.pull(owner_repo_commit) return loads(resp)
[ "langchain.load.load.loads", "langchainhub.Client", "langchain.load.dump.dumps" ]
[((671, 703), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (677, 703), False, 'from langchainhub import Client\n'), ((1907, 1920), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1912, 1920), False, 'from langchain.load.dump import dumps\n'), ((2857, 2868), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (2862, 2868), False, 'from langchain.load.load import loads\n')]
import langchain_visualizer # isort:skip # noqa: F401 import asyncio import vcr_langchain as vcr from langchain import PromptTemplate from langchain.chains import LLMChain from langchain.llms import OpenAI # ========================== Start of langchain example code ========================== # https://langchain.readthedocs.io/en/latest/modules/chains/getting_started.html llm = OpenAI(temperature=0) prompt = PromptTemplate( input_variables=["product"], template="What is a good name for a company that makes {product}?", ) chain = LLMChain(llm=llm, prompt=prompt) # ================================== Execute example ================================== @vcr.use_cassette() async def llm_chain_demo(): return chain.run("colorful socks") def test_llm_usage_succeeds(): """Check that the chain can run normally""" result = asyncio.get_event_loop().run_until_complete(llm_chain_demo()) assert result.strip() == "Socktastic!" if __name__ == "__main__": from langchain_visualizer import visualize visualize(llm_chain_demo)
[ "langchain.chains.LLMChain", "langchain_visualizer.visualize", "langchain.llms.OpenAI", "langchain.PromptTemplate" ]
[((387, 408), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (393, 408), False, 'from langchain.llms import OpenAI\n'), ((418, 534), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good name for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good name for a company that makes {product}?')\n", (432, 534), False, 'from langchain import PromptTemplate\n'), ((550, 582), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (558, 582), False, 'from langchain.chains import LLMChain\n'), ((676, 694), 'vcr_langchain.use_cassette', 'vcr.use_cassette', ([], {}), '()\n', (692, 694), True, 'import vcr_langchain as vcr\n'), ((1042, 1067), 'langchain_visualizer.visualize', 'visualize', (['llm_chain_demo'], {}), '(llm_chain_demo)\n', (1051, 1067), False, 'from langchain_visualizer import visualize\n'), ((856, 880), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (878, 880), False, 'import asyncio\n')]
"""Test logic on base chain class.""" from typing import Any, Dict, List, Optional import pytest from langchain.callbacks.base import CallbackManager from langchain.chains.base import Chain from langchain.schema import BaseMemory from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler class FakeMemory(BaseMemory): """Fake memory class for testing purposes.""" @property def memory_variables(self) -> List[str]: """Return baz variable.""" return ["baz"] def load_memory_variables( self, inputs: Optional[Dict[str, Any]] = None ) -> Dict[str, str]: """Return baz variable.""" return {"baz": "foo"} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Pass.""" pass def clear(self) -> None: """Pass.""" pass class FakeChain(Chain): """Fake chain class for testing purposes.""" be_correct: bool = True the_input_keys: List[str] = ["foo"] the_output_keys: List[str] = ["bar"] @property def input_keys(self) -> List[str]: """Input keys.""" return self.the_input_keys @property def output_keys(self) -> List[str]: """Output key of bar.""" return self.the_output_keys def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: if self.be_correct: return {"bar": "baz"} else: return {"baz": "bar"} def test_bad_inputs() -> None: """Test errors are raised if input keys are not found.""" chain = FakeChain() with pytest.raises(ValueError): chain({"foobar": "baz"}) def test_bad_outputs() -> None: """Test errors are raised if outputs keys are not found.""" chain = FakeChain(be_correct=False) with pytest.raises(ValueError): chain({"foo": "baz"}) def test_correct_call() -> None: """Test correct call of fake chain.""" chain = FakeChain() output = chain({"foo": "bar"}) assert output == {"foo": "bar", "bar": "baz"} def test_single_input_correct() -> None: """Test passing single input works.""" chain = FakeChain() output = chain("bar") assert output == {"foo": "bar", "bar": "baz"} def test_single_input_error() -> None: """Test passing single input errors as expected.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain("bar") def test_run_single_arg() -> None: """Test run method with single arg.""" chain = FakeChain() output = chain.run("bar") assert output == "baz" def test_run_multiple_args_error() -> None: """Test run method with multiple args errors as expected.""" chain = FakeChain() with pytest.raises(ValueError): chain.run("bar", "foo") def test_run_kwargs() -> None: """Test run method with kwargs.""" chain = FakeChain(the_input_keys=["foo", "bar"]) output = chain.run(foo="bar", bar="foo") assert output == "baz" def test_run_kwargs_error() -> None: """Test run method with kwargs errors as expected.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run(foo="bar", baz="foo") def test_run_args_and_kwargs_error() -> None: """Test run method with args and kwargs.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar", foo="bar") def test_multiple_output_keys_error() -> None: """Test run with multiple output keys errors as expected.""" chain = FakeChain(the_output_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar") def test_run_arg_with_memory() -> None: """Test run method works when arg is passed.""" chain = FakeChain(the_input_keys=["foo", "baz"], memory=FakeMemory()) chain.run("bar") def test_run_with_callback() -> None: """Test run method works when callback manager is passed.""" handler = FakeCallbackHandler() chain = FakeChain( callback_manager=CallbackManager(handlers=[handler]), verbose=True ) output = chain.run("bar") assert output == "baz" assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0 def test_run_with_callback_not_verbose() -> None: """Test run method works when callback manager is passed and not verbose.""" import langchain langchain.verbose = False handler = FakeCallbackHandler() chain = FakeChain(callback_manager=CallbackManager(handlers=[handler])) output = chain.run("bar") assert output == "baz" assert handler.starts == 0 assert handler.ends == 0 assert handler.errors == 0
[ "langchain.callbacks.base.CallbackManager" ]
[((3986, 4007), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4005, 4007), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((4460, 4481), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4479, 4481), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((1597, 1622), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1610, 1622), False, 'import pytest\n'), ((1804, 1829), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1817, 1829), False, 'import pytest\n'), ((2393, 2418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2406, 2418), False, 'import pytest\n'), ((2746, 2771), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2759, 2771), False, 'import pytest\n'), ((3161, 3186), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3174, 3186), False, 'import pytest\n'), ((3386, 3411), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3399, 3411), False, 'import pytest\n'), ((3626, 3651), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3639, 3651), False, 'import pytest\n'), ((4056, 4091), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4071, 4091), False, 'from langchain.callbacks.base import CallbackManager\n'), ((4521, 4556), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4536, 4556), False, 'from langchain.callbacks.base import CallbackManager\n')]
"""Test logic on base chain class.""" from typing import Any, Dict, List, Optional import pytest from langchain.callbacks.base import CallbackManager from langchain.chains.base import Chain from langchain.schema import BaseMemory from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler class FakeMemory(BaseMemory): """Fake memory class for testing purposes.""" @property def memory_variables(self) -> List[str]: """Return baz variable.""" return ["baz"] def load_memory_variables( self, inputs: Optional[Dict[str, Any]] = None ) -> Dict[str, str]: """Return baz variable.""" return {"baz": "foo"} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Pass.""" pass def clear(self) -> None: """Pass.""" pass class FakeChain(Chain): """Fake chain class for testing purposes.""" be_correct: bool = True the_input_keys: List[str] = ["foo"] the_output_keys: List[str] = ["bar"] @property def input_keys(self) -> List[str]: """Input keys.""" return self.the_input_keys @property def output_keys(self) -> List[str]: """Output key of bar.""" return self.the_output_keys def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: if self.be_correct: return {"bar": "baz"} else: return {"baz": "bar"} def test_bad_inputs() -> None: """Test errors are raised if input keys are not found.""" chain = FakeChain() with pytest.raises(ValueError): chain({"foobar": "baz"}) def test_bad_outputs() -> None: """Test errors are raised if outputs keys are not found.""" chain = FakeChain(be_correct=False) with pytest.raises(ValueError): chain({"foo": "baz"}) def test_correct_call() -> None: """Test correct call of fake chain.""" chain = FakeChain() output = chain({"foo": "bar"}) assert output == {"foo": "bar", "bar": "baz"} def test_single_input_correct() -> None: """Test passing single input works.""" chain = FakeChain() output = chain("bar") assert output == {"foo": "bar", "bar": "baz"} def test_single_input_error() -> None: """Test passing single input errors as expected.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain("bar") def test_run_single_arg() -> None: """Test run method with single arg.""" chain = FakeChain() output = chain.run("bar") assert output == "baz" def test_run_multiple_args_error() -> None: """Test run method with multiple args errors as expected.""" chain = FakeChain() with pytest.raises(ValueError): chain.run("bar", "foo") def test_run_kwargs() -> None: """Test run method with kwargs.""" chain = FakeChain(the_input_keys=["foo", "bar"]) output = chain.run(foo="bar", bar="foo") assert output == "baz" def test_run_kwargs_error() -> None: """Test run method with kwargs errors as expected.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run(foo="bar", baz="foo") def test_run_args_and_kwargs_error() -> None: """Test run method with args and kwargs.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar", foo="bar") def test_multiple_output_keys_error() -> None: """Test run with multiple output keys errors as expected.""" chain = FakeChain(the_output_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar") def test_run_arg_with_memory() -> None: """Test run method works when arg is passed.""" chain = FakeChain(the_input_keys=["foo", "baz"], memory=FakeMemory()) chain.run("bar") def test_run_with_callback() -> None: """Test run method works when callback manager is passed.""" handler = FakeCallbackHandler() chain = FakeChain( callback_manager=CallbackManager(handlers=[handler]), verbose=True ) output = chain.run("bar") assert output == "baz" assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0 def test_run_with_callback_not_verbose() -> None: """Test run method works when callback manager is passed and not verbose.""" import langchain langchain.verbose = False handler = FakeCallbackHandler() chain = FakeChain(callback_manager=CallbackManager(handlers=[handler])) output = chain.run("bar") assert output == "baz" assert handler.starts == 0 assert handler.ends == 0 assert handler.errors == 0
[ "langchain.callbacks.base.CallbackManager" ]
[((3986, 4007), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4005, 4007), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((4460, 4481), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4479, 4481), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((1597, 1622), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1610, 1622), False, 'import pytest\n'), ((1804, 1829), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1817, 1829), False, 'import pytest\n'), ((2393, 2418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2406, 2418), False, 'import pytest\n'), ((2746, 2771), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2759, 2771), False, 'import pytest\n'), ((3161, 3186), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3174, 3186), False, 'import pytest\n'), ((3386, 3411), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3399, 3411), False, 'import pytest\n'), ((3626, 3651), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3639, 3651), False, 'import pytest\n'), ((4056, 4091), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4071, 4091), False, 'from langchain.callbacks.base import CallbackManager\n'), ((4521, 4556), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4536, 4556), False, 'from langchain.callbacks.base import CallbackManager\n')]
"""Test logic on base chain class.""" from typing import Any, Dict, List, Optional import pytest from langchain.callbacks.base import CallbackManager from langchain.chains.base import Chain from langchain.schema import BaseMemory from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler class FakeMemory(BaseMemory): """Fake memory class for testing purposes.""" @property def memory_variables(self) -> List[str]: """Return baz variable.""" return ["baz"] def load_memory_variables( self, inputs: Optional[Dict[str, Any]] = None ) -> Dict[str, str]: """Return baz variable.""" return {"baz": "foo"} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Pass.""" pass def clear(self) -> None: """Pass.""" pass class FakeChain(Chain): """Fake chain class for testing purposes.""" be_correct: bool = True the_input_keys: List[str] = ["foo"] the_output_keys: List[str] = ["bar"] @property def input_keys(self) -> List[str]: """Input keys.""" return self.the_input_keys @property def output_keys(self) -> List[str]: """Output key of bar.""" return self.the_output_keys def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: if self.be_correct: return {"bar": "baz"} else: return {"baz": "bar"} def test_bad_inputs() -> None: """Test errors are raised if input keys are not found.""" chain = FakeChain() with pytest.raises(ValueError): chain({"foobar": "baz"}) def test_bad_outputs() -> None: """Test errors are raised if outputs keys are not found.""" chain = FakeChain(be_correct=False) with pytest.raises(ValueError): chain({"foo": "baz"}) def test_correct_call() -> None: """Test correct call of fake chain.""" chain = FakeChain() output = chain({"foo": "bar"}) assert output == {"foo": "bar", "bar": "baz"} def test_single_input_correct() -> None: """Test passing single input works.""" chain = FakeChain() output = chain("bar") assert output == {"foo": "bar", "bar": "baz"} def test_single_input_error() -> None: """Test passing single input errors as expected.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain("bar") def test_run_single_arg() -> None: """Test run method with single arg.""" chain = FakeChain() output = chain.run("bar") assert output == "baz" def test_run_multiple_args_error() -> None: """Test run method with multiple args errors as expected.""" chain = FakeChain() with pytest.raises(ValueError): chain.run("bar", "foo") def test_run_kwargs() -> None: """Test run method with kwargs.""" chain = FakeChain(the_input_keys=["foo", "bar"]) output = chain.run(foo="bar", bar="foo") assert output == "baz" def test_run_kwargs_error() -> None: """Test run method with kwargs errors as expected.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run(foo="bar", baz="foo") def test_run_args_and_kwargs_error() -> None: """Test run method with args and kwargs.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar", foo="bar") def test_multiple_output_keys_error() -> None: """Test run with multiple output keys errors as expected.""" chain = FakeChain(the_output_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar") def test_run_arg_with_memory() -> None: """Test run method works when arg is passed.""" chain = FakeChain(the_input_keys=["foo", "baz"], memory=FakeMemory()) chain.run("bar") def test_run_with_callback() -> None: """Test run method works when callback manager is passed.""" handler = FakeCallbackHandler() chain = FakeChain( callback_manager=CallbackManager(handlers=[handler]), verbose=True ) output = chain.run("bar") assert output == "baz" assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0 def test_run_with_callback_not_verbose() -> None: """Test run method works when callback manager is passed and not verbose.""" import langchain langchain.verbose = False handler = FakeCallbackHandler() chain = FakeChain(callback_manager=CallbackManager(handlers=[handler])) output = chain.run("bar") assert output == "baz" assert handler.starts == 0 assert handler.ends == 0 assert handler.errors == 0
[ "langchain.callbacks.base.CallbackManager" ]
[((3986, 4007), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4005, 4007), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((4460, 4481), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4479, 4481), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((1597, 1622), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1610, 1622), False, 'import pytest\n'), ((1804, 1829), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1817, 1829), False, 'import pytest\n'), ((2393, 2418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2406, 2418), False, 'import pytest\n'), ((2746, 2771), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2759, 2771), False, 'import pytest\n'), ((3161, 3186), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3174, 3186), False, 'import pytest\n'), ((3386, 3411), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3399, 3411), False, 'import pytest\n'), ((3626, 3651), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3639, 3651), False, 'import pytest\n'), ((4056, 4091), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4071, 4091), False, 'from langchain.callbacks.base import CallbackManager\n'), ((4521, 4556), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4536, 4556), False, 'from langchain.callbacks.base import CallbackManager\n')]
"""Test logic on base chain class.""" from typing import Any, Dict, List, Optional import pytest from langchain.callbacks.base import CallbackManager from langchain.chains.base import Chain from langchain.schema import BaseMemory from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler class FakeMemory(BaseMemory): """Fake memory class for testing purposes.""" @property def memory_variables(self) -> List[str]: """Return baz variable.""" return ["baz"] def load_memory_variables( self, inputs: Optional[Dict[str, Any]] = None ) -> Dict[str, str]: """Return baz variable.""" return {"baz": "foo"} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Pass.""" pass def clear(self) -> None: """Pass.""" pass class FakeChain(Chain): """Fake chain class for testing purposes.""" be_correct: bool = True the_input_keys: List[str] = ["foo"] the_output_keys: List[str] = ["bar"] @property def input_keys(self) -> List[str]: """Input keys.""" return self.the_input_keys @property def output_keys(self) -> List[str]: """Output key of bar.""" return self.the_output_keys def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: if self.be_correct: return {"bar": "baz"} else: return {"baz": "bar"} def test_bad_inputs() -> None: """Test errors are raised if input keys are not found.""" chain = FakeChain() with pytest.raises(ValueError): chain({"foobar": "baz"}) def test_bad_outputs() -> None: """Test errors are raised if outputs keys are not found.""" chain = FakeChain(be_correct=False) with pytest.raises(ValueError): chain({"foo": "baz"}) def test_correct_call() -> None: """Test correct call of fake chain.""" chain = FakeChain() output = chain({"foo": "bar"}) assert output == {"foo": "bar", "bar": "baz"} def test_single_input_correct() -> None: """Test passing single input works.""" chain = FakeChain() output = chain("bar") assert output == {"foo": "bar", "bar": "baz"} def test_single_input_error() -> None: """Test passing single input errors as expected.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain("bar") def test_run_single_arg() -> None: """Test run method with single arg.""" chain = FakeChain() output = chain.run("bar") assert output == "baz" def test_run_multiple_args_error() -> None: """Test run method with multiple args errors as expected.""" chain = FakeChain() with pytest.raises(ValueError): chain.run("bar", "foo") def test_run_kwargs() -> None: """Test run method with kwargs.""" chain = FakeChain(the_input_keys=["foo", "bar"]) output = chain.run(foo="bar", bar="foo") assert output == "baz" def test_run_kwargs_error() -> None: """Test run method with kwargs errors as expected.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run(foo="bar", baz="foo") def test_run_args_and_kwargs_error() -> None: """Test run method with args and kwargs.""" chain = FakeChain(the_input_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar", foo="bar") def test_multiple_output_keys_error() -> None: """Test run with multiple output keys errors as expected.""" chain = FakeChain(the_output_keys=["foo", "bar"]) with pytest.raises(ValueError): chain.run("bar") def test_run_arg_with_memory() -> None: """Test run method works when arg is passed.""" chain = FakeChain(the_input_keys=["foo", "baz"], memory=FakeMemory()) chain.run("bar") def test_run_with_callback() -> None: """Test run method works when callback manager is passed.""" handler = FakeCallbackHandler() chain = FakeChain( callback_manager=CallbackManager(handlers=[handler]), verbose=True ) output = chain.run("bar") assert output == "baz" assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0 def test_run_with_callback_not_verbose() -> None: """Test run method works when callback manager is passed and not verbose.""" import langchain langchain.verbose = False handler = FakeCallbackHandler() chain = FakeChain(callback_manager=CallbackManager(handlers=[handler])) output = chain.run("bar") assert output == "baz" assert handler.starts == 0 assert handler.ends == 0 assert handler.errors == 0
[ "langchain.callbacks.base.CallbackManager" ]
[((3986, 4007), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4005, 4007), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((4460, 4481), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4479, 4481), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((1597, 1622), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1610, 1622), False, 'import pytest\n'), ((1804, 1829), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1817, 1829), False, 'import pytest\n'), ((2393, 2418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2406, 2418), False, 'import pytest\n'), ((2746, 2771), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2759, 2771), False, 'import pytest\n'), ((3161, 3186), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3174, 3186), False, 'import pytest\n'), ((3386, 3411), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3399, 3411), False, 'import pytest\n'), ((3626, 3651), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3639, 3651), False, 'import pytest\n'), ((4056, 4091), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4071, 4091), False, 'from langchain.callbacks.base import CallbackManager\n'), ((4521, 4556), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4536, 4556), False, 'from langchain.callbacks.base import CallbackManager\n')]
"""Test Momento cache functionality. To run tests, set the environment variable MOMENTO_AUTH_TOKEN to a valid Momento auth token. This can be obtained by signing up for a free Momento account at https://gomomento.com/. """ from __future__ import annotations import uuid from datetime import timedelta from typing import Iterator import pytest import langchain from langchain.cache import MomentoCache from langchain.schema import Generation, LLMResult from tests.unit_tests.llms.fake_llm import FakeLLM def random_string() -> str: return str(uuid.uuid4()) @pytest.fixture(scope="module") def momento_cache() -> Iterator[MomentoCache]: from momento import CacheClient, Configurations, CredentialProvider cache_name = f"langchain-test-cache-{random_string()}" client = CacheClient( Configurations.Laptop.v1(), CredentialProvider.from_environment_variable("MOMENTO_AUTH_TOKEN"), default_ttl=timedelta(seconds=30), ) try: llm_cache = MomentoCache(client, cache_name) langchain.llm_cache = llm_cache yield llm_cache finally: client.delete_cache(cache_name) def test_invalid_ttl() -> None: from momento import CacheClient, Configurations, CredentialProvider client = CacheClient( Configurations.Laptop.v1(), CredentialProvider.from_environment_variable("MOMENTO_AUTH_TOKEN"), default_ttl=timedelta(seconds=30), ) with pytest.raises(ValueError): MomentoCache(client, cache_name=random_string(), ttl=timedelta(seconds=-1)) def test_momento_cache_miss(momento_cache: MomentoCache) -> None: llm = FakeLLM() stub_llm_output = LLMResult(generations=[[Generation(text="foo")]]) assert llm.generate([random_string()]) == stub_llm_output @pytest.mark.parametrize( "prompts, generations", [ # Single prompt, single generation ([random_string()], [[random_string()]]), # Single prompt, multiple generations ([random_string()], [[random_string(), random_string()]]), # Single prompt, multiple generations ([random_string()], [[random_string(), random_string(), random_string()]]), # Multiple prompts, multiple generations ( [random_string(), random_string()], [[random_string()], [random_string(), random_string()]], ), ], ) def test_momento_cache_hit( momento_cache: MomentoCache, prompts: list[str], generations: list[list[str]] ) -> None: llm = FakeLLM() params = llm.dict() params["stop"] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) llm_generations = [ [ Generation(text=generation, generation_info=params) for generation in prompt_i_generations ] for prompt_i_generations in generations ] for prompt_i, llm_generations_i in zip(prompts, llm_generations): momento_cache.update(prompt_i, llm_string, llm_generations_i) assert llm.generate(prompts) == LLMResult( generations=llm_generations, llm_output={} )
[ "langchain.cache.MomentoCache", "langchain.schema.LLMResult", "langchain.schema.Generation" ]
[((569, 599), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (583, 599), False, 'import pytest\n'), ((1637, 1646), 'tests.unit_tests.llms.fake_llm.FakeLLM', 'FakeLLM', ([], {}), '()\n', (1644, 1646), False, 'from tests.unit_tests.llms.fake_llm import FakeLLM\n'), ((2507, 2516), 'tests.unit_tests.llms.fake_llm.FakeLLM', 'FakeLLM', ([], {}), '()\n', (2514, 2516), False, 'from tests.unit_tests.llms.fake_llm import FakeLLM\n'), ((552, 564), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (562, 564), False, 'import uuid\n'), ((813, 839), 'momento.Configurations.Laptop.v1', 'Configurations.Laptop.v1', ([], {}), '()\n', (837, 839), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((849, 915), 'momento.CredentialProvider.from_environment_variable', 'CredentialProvider.from_environment_variable', (['"""MOMENTO_AUTH_TOKEN"""'], {}), "('MOMENTO_AUTH_TOKEN')\n", (893, 915), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((995, 1027), 'langchain.cache.MomentoCache', 'MomentoCache', (['client', 'cache_name'], {}), '(client, cache_name)\n', (1007, 1027), False, 'from langchain.cache import MomentoCache\n'), ((1286, 1312), 'momento.Configurations.Laptop.v1', 'Configurations.Laptop.v1', ([], {}), '()\n', (1310, 1312), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((1322, 1388), 'momento.CredentialProvider.from_environment_variable', 'CredentialProvider.from_environment_variable', (['"""MOMENTO_AUTH_TOKEN"""'], {}), "('MOMENTO_AUTH_TOKEN')\n", (1366, 1388), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((1448, 1473), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1461, 1473), False, 'import pytest\n'), ((3024, 3077), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'llm_generations', 'llm_output': '{}'}), '(generations=llm_generations, llm_output={})\n', (3033, 3077), False, 'from langchain.schema import Generation, LLMResult\n'), ((937, 958), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(30)'}), '(seconds=30)\n', (946, 958), False, 'from datetime import timedelta\n'), ((1410, 1431), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(30)'}), '(seconds=30)\n', (1419, 1431), False, 'from datetime import timedelta\n'), ((2680, 2731), 'langchain.schema.Generation', 'Generation', ([], {'text': 'generation', 'generation_info': 'params'}), '(text=generation, generation_info=params)\n', (2690, 2731), False, 'from langchain.schema import Generation, LLMResult\n'), ((1536, 1557), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(-1)'}), '(seconds=-1)\n', (1545, 1557), False, 'from datetime import timedelta\n'), ((1693, 1715), 'langchain.schema.Generation', 'Generation', ([], {'text': '"""foo"""'}), "(text='foo')\n", (1703, 1715), False, 'from langchain.schema import Generation, LLMResult\n')]
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. import langchain from langchain.llms import Replicate from flask import Flask from flask import request import os import requests import json os.environ["REPLICATE_API_TOKEN"] = "<your replicate api token>" llama2_13b_chat = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d" llm = Replicate( model=llama2_13b_chat, model_kwargs={"temperature": 0.01, "top_p": 1, "max_new_tokens":500} ) app = Flask(__name__) @app.route('/msgrcvd_pager', methods=['POST', 'GET']) def msgrcvd_pager(): message = request.args.get('message') sender = request.args.get('sender') recipient = request.args.get('recipient') answer = llm(message) print(message) print(answer) url = f"https://graph.facebook.com/v18.0/{recipient}/messages" params = { 'recipient': '{"id": ' + sender + '}', 'message': json.dumps({'text': answer}), 'messaging_type': 'RESPONSE', 'access_token': "<your page access token>" } headers = { 'Content-Type': 'application/json' } response = requests.post(url, params=params, headers=headers) print(response.status_code) print(response.text) return message + "<p/>" + answer
[ "langchain.llms.Replicate" ]
[((488, 595), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (497, 595), False, 'from langchain.llms import Replicate\n'), ((608, 623), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (613, 623), False, 'from flask import Flask\n'), ((718, 745), 'flask.request.args.get', 'request.args.get', (['"""message"""'], {}), "('message')\n", (734, 745), False, 'from flask import request\n'), ((759, 785), 'flask.request.args.get', 'request.args.get', (['"""sender"""'], {}), "('sender')\n", (775, 785), False, 'from flask import request\n'), ((802, 831), 'flask.request.args.get', 'request.args.get', (['"""recipient"""'], {}), "('recipient')\n", (818, 831), False, 'from flask import request\n'), ((1250, 1300), 'requests.post', 'requests.post', (['url'], {'params': 'params', 'headers': 'headers'}), '(url, params=params, headers=headers)\n', (1263, 1300), False, 'import requests\n'), ((1045, 1073), 'json.dumps', 'json.dumps', (["{'text': answer}"], {}), "({'text': answer})\n", (1055, 1073), False, 'import json\n')]
"""**Document Transformers** are classes to transform Documents. **Document Transformers** usually used to transform a lot of Documents in a single run. **Class hierarchy:** .. code-block:: BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator **Main helpers:** .. code-block:: Document """ # noqa: E501 import warnings from typing import Any from langchain_core._api import LangChainDeprecationWarning from langchain.utils.interactive_env import is_interactive_env def __getattr__(name: str) -> Any: from langchain_community import document_transformers # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing document transformers from langchain is deprecated. Importing " "from langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.document_transformers import {name}`.\n\n" "To install langchain-community run `pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) return getattr(document_transformers, name) __all__ = [ "BeautifulSoupTransformer", "DoctranQATransformer", "DoctranTextTranslator", "DoctranPropertyExtractor", "EmbeddingsClusteringFilter", "EmbeddingsRedundantFilter", "GoogleTranslateTransformer", "get_stateful_documents", "LongContextReorder", "NucliaTextTransformer", "OpenAIMetadataTagger", "Html2TextTransformer", ]
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((677, 697), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (695, 697), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((707, 1102), 'warnings.warn', 'warnings.warn', (['f"""Importing document transformers from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.document_transformers import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing document transformers from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.document_transformers import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (720, 1102), False, 'import warnings\n')]
# Make sure to install the following packages: dlt, langchain, duckdb, python-dotenv, openai, weaviate-client import logging from langchain.text_splitter import RecursiveCharacterTextSplitter from marshmallow import Schema, fields from loaders.loaders import _document_loader # Add the parent directory to sys.path logging.basicConfig(level=logging.INFO) from langchain.retrievers import WeaviateHybridSearchRetriever, ParentDocumentRetriever from weaviate.gql.get import HybridFusion import tracemalloc tracemalloc.start() import os from langchain.embeddings.openai import OpenAIEmbeddings from dotenv import load_dotenv from langchain.schema import Document import weaviate load_dotenv() LTM_MEMORY_ID_DEFAULT = "00000" ST_MEMORY_ID_DEFAULT = "0000" BUFFER_ID_DEFAULT = "0000" class VectorDB: OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") def __init__( self, user_id: str, index_name: str, memory_id: str, namespace: str = None, embeddings = None, ): self.user_id = user_id self.index_name = index_name self.namespace = namespace self.memory_id = memory_id self.embeddings = embeddings class PineconeVectorDB(VectorDB): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.init_pinecone(self.index_name) def init_pinecone(self, index_name): # Pinecone initialization logic pass import langchain.embeddings class WeaviateVectorDB(VectorDB): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.init_weaviate(embeddings= self.embeddings, namespace = self.namespace) def init_weaviate(self, embeddings=OpenAIEmbeddings(), namespace=None,retriever_type="",): # Weaviate initialization logic auth_config = weaviate.auth.AuthApiKey( api_key=os.environ.get("WEAVIATE_API_KEY") ) client = weaviate.Client( url=os.environ.get("WEAVIATE_URL"), auth_client_secret=auth_config, additional_headers={"X-OpenAI-Api-Key": os.environ.get("OPENAI_API_KEY")}, ) if retriever_type == "single_document_context": retriever = WeaviateHybridSearchRetriever( client=client, index_name=namespace, text_key="text", attributes=[], embedding=embeddings, create_schema_if_missing=True, ) return retriever elif retriever_type == "multi_document_context": retriever = WeaviateHybridSearchRetriever( client=client, index_name=namespace, text_key="text", attributes=[], embedding=embeddings, create_schema_if_missing=True, ) return retriever else : return client # child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) # store = InMemoryStore() # retriever = ParentDocumentRetriever( # vectorstore=vectorstore, # docstore=store, # child_splitter=child_splitter, # ) from marshmallow import Schema, fields def create_document_structure(observation, params, metadata_schema_class=None): """ Create and validate a document structure with optional custom fields. :param observation: Content of the document. :param params: Metadata information. :param metadata_schema_class: Custom metadata schema class (optional). :return: A list containing the validated document data. """ document_data = { "metadata": params, "page_content": observation } def get_document_schema(): class DynamicDocumentSchema(Schema): metadata = fields.Nested(metadata_schema_class, required=True) page_content = fields.Str(required=True) return DynamicDocumentSchema # Validate and deserialize, defaulting to "1.0" if not provided CurrentDocumentSchema = get_document_schema() loaded_document = CurrentDocumentSchema().load(document_data) return [loaded_document] def _stuct(self, observation, params, metadata_schema_class =None): """Utility function to create the document structure with optional custom fields.""" # Construct document data document_data = { "metadata": params, "page_content": observation } def get_document_schema(): class DynamicDocumentSchema(Schema): metadata = fields.Nested(metadata_schema_class, required=True) page_content = fields.Str(required=True) return DynamicDocumentSchema # Validate and deserialize # Default to "1.0" if not provided CurrentDocumentSchema = get_document_schema() loaded_document = CurrentDocumentSchema().load(document_data) return [loaded_document] async def add_memories(self, observation, loader_settings=None, params=None, namespace=None, metadata_schema_class=None, embeddings = 'hybrid'): # Update Weaviate memories here if namespace is None: namespace = self.namespace retriever = self.init_weaviate(embeddings=embeddings,namespace = namespace, retriever_type="single_document_context") if loader_settings: # Assuming _document_loader returns a list of documents documents = await _document_loader(observation, loader_settings) logging.info("here are the docs %s", str(documents)) for doc in documents[0]: document_to_load = self._stuct(doc.page_content, params, metadata_schema_class) logging.info("Loading document with provided loader settings %s", str(document_to_load)) retriever.add_documents([ Document(metadata=document_to_load[0]['metadata'], page_content=document_to_load[0]['page_content'])]) else: document_to_load = self._stuct(observation, params, metadata_schema_class) logging.info("Loading document with defautl loader settings %s", str(document_to_load)) retriever.add_documents([ Document(metadata=document_to_load[0]['metadata'], page_content=document_to_load[0]['page_content'])]) async def fetch_memories(self, observation: str, namespace: str = None, search_type: str = 'hybrid', **kwargs): """ Fetch documents from weaviate. Parameters: - observation (str): User query. - namespace (str, optional): Type of memory accessed. - search_type (str, optional): Type of search ('text', 'hybrid', 'bm25', 'generate', 'generate_grouped'). Defaults to 'hybrid'. - **kwargs: Additional parameters for flexibility. Returns: List of documents matching the query or an empty list in case of error. Example: fetch_memories(query="some query", search_type='text', additional_param='value') """ client = self.init_weaviate(namespace =self.namespace) if search_type is None: search_type = 'hybrid' logging.info("The search type is %s", str(search_type)) if not namespace: namespace = self.namespace logging.info("Query on namespace %s", namespace) params_user_id = { "path": ["user_id"], "operator": "Like", "valueText": self.user_id, } def list_objects_of_class(class_name, schema): return [ prop["name"] for class_obj in schema["classes"] if class_obj["class"] == class_name for prop in class_obj["properties"] ] base_query = client.query.get( namespace, list(list_objects_of_class(namespace, client.schema.get())) ).with_additional( ["id", "creationTimeUnix", "lastUpdateTimeUnix", "score", 'distance'] ).with_where(params_user_id).with_limit(10) n_of_observations = kwargs.get('n_of_observations', 2) # try: if search_type == 'text': query_output = ( base_query .with_near_text({"concepts": [observation]}) .with_autocut(n_of_observations) .do() ) elif search_type == 'hybrid': query_output = ( base_query .with_hybrid(query=observation, fusion_type=HybridFusion.RELATIVE_SCORE) .with_autocut(n_of_observations) .do() ) elif search_type == 'bm25': query_output = ( base_query .with_bm25(query=observation) .with_autocut(n_of_observations) .do() ) elif search_type == 'generate': generate_prompt = kwargs.get('generate_prompt', "") query_output = ( base_query .with_generate(single_prompt=observation) .with_near_text({"concepts": [observation]}) .with_autocut(n_of_observations) .do() ) elif search_type == 'generate_grouped': generate_prompt = kwargs.get('generate_prompt', "") query_output = ( base_query .with_generate(grouped_task=observation) .with_near_text({"concepts": [observation]}) .with_autocut(n_of_observations) .do() ) else: logging.error(f"Invalid search_type: {search_type}") return [] # except Exception as e: # logging.error(f"Error executing query: {str(e)}") # return [] return query_output async def delete_memories(self, namespace:str, params: dict = None): if namespace is None: namespace = self.namespace client = self.init_weaviate(namespace = self.namespace) if params: where_filter = { "path": ["id"], "operator": "Equal", "valueText": params.get("id", None), } return client.batch.delete_objects( class_name=self.namespace, # Same `where` filter as in the GraphQL API where=where_filter, ) else: # Delete all objects print("HERE IS THE USER ID", self.user_id) return client.batch.delete_objects( class_name=namespace, where={ "path": ["version"], "operator": "Equal", "valueText": "1.0", }, ) async def count_memories(self, namespace: str = None, params: dict = None) -> int: """ Count memories in a Weaviate database. Args: namespace (str, optional): The Weaviate namespace to count memories in. If not provided, uses the default namespace. Returns: int: The number of memories in the specified namespace. """ if namespace is None: namespace = self.namespace client = self.init_weaviate(namespace =namespace) try: object_count = client.query.aggregate(namespace).with_meta_count().do() return object_count except Exception as e: logging.info(f"Error counting memories: {str(e)}") # Handle the error or log it return 0 def update_memories(self, observation, namespace: str, params: dict = None): client = self.init_weaviate(namespace = self.namespace) client.data_object.update( data_object={ # "text": observation, "user_id": str(self.user_id), "version": params.get("version", None) or "", "agreement_id": params.get("agreement_id", None) or "", "privacy_policy": params.get("privacy_policy", None) or "", "terms_of_service": params.get("terms_of_service", None) or "", "format": params.get("format", None) or "", "schema_version": params.get("schema_version", None) or "", "checksum": params.get("checksum", None) or "", "owner": params.get("owner", None) or "", "license": params.get("license", None) or "", "validity_start": params.get("validity_start", None) or "", "validity_end": params.get("validity_end", None) or "" # **source_metadata, }, class_name="Test", uuid=params.get("id", None), consistency_level=weaviate.data.replication.ConsistencyLevel.ALL, # default QUORUM ) return
[ "langchain.schema.Document", "langchain.retrievers.WeaviateHybridSearchRetriever", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((319, 358), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (338, 358), False, 'import logging\n'), ((508, 527), 'tracemalloc.start', 'tracemalloc.start', ([], {}), '()\n', (525, 527), False, 'import tracemalloc\n'), ((681, 694), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (692, 694), False, 'from dotenv import load_dotenv\n'), ((823, 854), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (832, 854), False, 'import os\n'), ((1727, 1745), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1743, 1745), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((7506, 7554), 'logging.info', 'logging.info', (['"""Query on namespace %s"""', 'namespace'], {}), "('Query on namespace %s', namespace)\n", (7518, 7554), False, 'import logging\n'), ((2240, 2401), 'langchain.retrievers.WeaviateHybridSearchRetriever', 'WeaviateHybridSearchRetriever', ([], {'client': 'client', 'index_name': 'namespace', 'text_key': '"""text"""', 'attributes': '[]', 'embedding': 'embeddings', 'create_schema_if_missing': '(True)'}), "(client=client, index_name=namespace, text_key\n ='text', attributes=[], embedding=embeddings, create_schema_if_missing=True\n )\n", (2269, 2401), False, 'from langchain.retrievers import WeaviateHybridSearchRetriever, ParentDocumentRetriever\n'), ((1891, 1925), 'os.environ.get', 'os.environ.get', (['"""WEAVIATE_API_KEY"""'], {}), "('WEAVIATE_API_KEY')\n", (1905, 1925), False, 'import os\n'), ((1986, 2016), 'os.environ.get', 'os.environ.get', (['"""WEAVIATE_URL"""'], {}), "('WEAVIATE_URL')\n", (2000, 2016), False, 'import os\n'), ((2613, 2774), 'langchain.retrievers.WeaviateHybridSearchRetriever', 'WeaviateHybridSearchRetriever', ([], {'client': 'client', 'index_name': 'namespace', 'text_key': '"""text"""', 'attributes': '[]', 'embedding': 'embeddings', 'create_schema_if_missing': '(True)'}), "(client=client, index_name=namespace, text_key\n ='text', attributes=[], embedding=embeddings, create_schema_if_missing=True\n )\n", (2642, 2774), False, 'from langchain.retrievers import WeaviateHybridSearchRetriever, ParentDocumentRetriever\n'), ((3975, 4026), 'marshmallow.fields.Nested', 'fields.Nested', (['metadata_schema_class'], {'required': '(True)'}), '(metadata_schema_class, required=True)\n', (3988, 4026), False, 'from marshmallow import Schema, fields\n'), ((4058, 4083), 'marshmallow.fields.Str', 'fields.Str', ([], {'required': '(True)'}), '(required=True)\n', (4068, 4083), False, 'from marshmallow import Schema, fields\n'), ((4777, 4828), 'marshmallow.fields.Nested', 'fields.Nested', (['metadata_schema_class'], {'required': '(True)'}), '(metadata_schema_class, required=True)\n', (4790, 4828), False, 'from marshmallow import Schema, fields\n'), ((4860, 4885), 'marshmallow.fields.Str', 'fields.Str', ([], {'required': '(True)'}), '(required=True)\n', (4870, 4885), False, 'from marshmallow import Schema, fields\n'), ((5666, 5712), 'loaders.loaders._document_loader', '_document_loader', (['observation', 'loader_settings'], {}), '(observation, loader_settings)\n', (5682, 5712), False, 'from loaders.loaders import _document_loader\n'), ((2114, 2146), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2128, 2146), False, 'import os\n'), ((6426, 6531), 'langchain.schema.Document', 'Document', ([], {'metadata': "document_to_load[0]['metadata']", 'page_content': "document_to_load[0]['page_content']"}), "(metadata=document_to_load[0]['metadata'], page_content=\n document_to_load[0]['page_content'])\n", (6434, 6531), False, 'from langchain.schema import Document\n'), ((6071, 6176), 'langchain.schema.Document', 'Document', ([], {'metadata': "document_to_load[0]['metadata']", 'page_content': "document_to_load[0]['page_content']"}), "(metadata=document_to_load[0]['metadata'], page_content=\n document_to_load[0]['page_content'])\n", (6079, 6176), False, 'from langchain.schema import Document\n'), ((9824, 9876), 'logging.error', 'logging.error', (['f"""Invalid search_type: {search_type}"""'], {}), "(f'Invalid search_type: {search_type}')\n", (9837, 9876), False, 'import logging\n')]
# based on: https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/pgvector.html from typing import List, Tuple from langchain.embeddings.openai import OpenAIEmbeddings import langchain.vectorstores.pgvector class RepoSearcher: store: langchain.vectorstores.pgvector.PGVector def __init__(self, collection_name: str, connection_string: str): self.store = langchain.vectorstores.pgvector.PGVector( embedding_function=OpenAIEmbeddings(), # type: ignore collection_name=collection_name, connection_string=connection_string, distance_strategy=langchain.vectorstores.pgvector.DistanceStrategy.COSINE, ) def find_repos(self, query: str, limit=4) -> List[Tuple[str, str]]: results = self.store.similarity_search_with_score(query, limit) # sort by relevance, returning most relevant repository first results.sort(key=lambda a: a[1], reverse=True) return [ (r[0].metadata["namespace"], r[0].metadata["repository"]) for r in results ]
[ "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((469, 487), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (485, 487), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n')]
import os import chardet import importlib from pathlib import Path from WebUI.text_splitter import zh_title_enhance as func_zh_title_enhance from WebUI.Server.document_loaders import RapidOCRPDFLoader, RapidOCRLoader import langchain.document_loaders from langchain.docstore.document import Document from langchain.text_splitter import TextSplitter from WebUI.configs.basicconfig import (GetKbConfig, GetKbRootPath, GetTextSplitterDict) from WebUI.Server.utils import run_in_thread_pool, get_model_worker_config from WebUI.Server.document_loaders import * from typing import List, Union,Dict, Tuple, Generator TEXT_SPLITTER_NAME = "ChineseRecursiveTextSplitter" CHUNK_SIZE = 500 OVERLAP_SIZE = 100 ZH_TITLE_ENHANCE = False VECTOR_SEARCH_TOP_K = 5 SCORE_THRESHOLD = 1.5 LOADER_DICT = {"UnstructuredHTMLLoader": ['.html'], "MHTMLLoader": ['.mhtml'], "UnstructuredMarkdownLoader": ['.md'], "JSONLoader": [".json"], "JSONLinesLoader": [".jsonl"], "CSVLoader": [".csv"], # "FilteredCSVLoader": [".csv"], # 需要自己指定,目前还没有支持 "RapidOCRPDFLoader": [".pdf"], "RapidOCRLoader": ['.png', '.jpg', '.jpeg', '.bmp'], "UnstructuredEmailLoader": ['.eml', '.msg'], "UnstructuredEPubLoader": ['.epub'], "UnstructuredExcelLoader": ['.xlsx', '.xls', '.xlsd'], "NotebookLoader": ['.ipynb'], "UnstructuredODTLoader": ['.odt'], "PythonLoader": ['.py'], "UnstructuredRSTLoader": ['.rst'], "UnstructuredRTFLoader": ['.rtf'], "SRTLoader": ['.srt'], "TomlLoader": ['.toml'], "UnstructuredTSVLoader": ['.tsv'], "UnstructuredWordDocumentLoader": ['.docx', '.doc'], "UnstructuredXMLLoader": ['.xml'], "UnstructuredPowerPointLoader": ['.ppt', '.pptx'], "EverNoteLoader": ['.enex'], "UnstructuredFileLoader": ['.txt'], } SUPPORTED_EXTS = [ext for sublist in LOADER_DICT.values() for ext in sublist] def validate_kb_name(knowledge_base_id: str) -> bool: if "../" in knowledge_base_id: return False return True def get_kb_path(knowledge_base_name: str): kb_config = GetKbConfig() kb_root_path = GetKbRootPath(kb_config) return os.path.join(kb_root_path, knowledge_base_name) def get_doc_path(knowledge_base_name: str): return os.path.join(get_kb_path(knowledge_base_name), "content") def get_vs_path(knowledge_base_name: str, vector_name: str): return os.path.join(get_kb_path(knowledge_base_name), "vector_store", vector_name) def get_file_path(knowledge_base_name: str, doc_name: str): return os.path.join(get_doc_path(knowledge_base_name), doc_name) def list_files_from_folder(kb_name: str): doc_path = get_doc_path(kb_name) result = [] def is_skiped_path(path: str): tail = os.path.basename(path).lower() for x in ["temp", "tmp", ".", "~$"]: if tail.startswith(x): return True return False def process_entry(entry): if is_skiped_path(entry.path): return if entry.is_symlink(): target_path = os.path.realpath(entry.path) with os.scandir(target_path) as target_it: for target_entry in target_it: process_entry(target_entry) elif entry.is_file(): file_path = (Path(os.path.relpath(entry.path, doc_path)).as_posix()) result.append(file_path) elif entry.is_dir(): with os.scandir(entry.path) as it: for sub_entry in it: process_entry(sub_entry) with os.scandir(doc_path) as it: for entry in it: process_entry(entry) return result def get_LoaderClass(file_extension): for LoaderClass, extensions in LOADER_DICT.items(): if file_extension in extensions: return LoaderClass def get_loader(loader_name: str, file_path: str, loader_kwargs: Dict = None): loader_kwargs = loader_kwargs or {} try: if loader_name == "RapidOCRPDFLoader": DocumentLoader = RapidOCRPDFLoader elif loader_name == "RapidOCRLoader": DocumentLoader = RapidOCRLoader else: document_loaders_module = importlib.import_module('langchain.document_loaders') DocumentLoader = getattr(document_loaders_module, loader_name) except Exception as e: msg = f"for file {file_path} search loader {loader_name} failed: {e}" print(f'{e.__class__.__name__}: {msg}') document_loaders_module = importlib.import_module('langchain.document_loaders') DocumentLoader = getattr(document_loaders_module, "UnstructuredFileLoader") if loader_name == "UnstructuredFileLoader": loader_kwargs.setdefault("autodetect_encoding", True) elif loader_name == "CSVLoader": if not loader_kwargs.get("encoding"): with open(file_path, 'rb') as struct_file: encode_detect = chardet.detect(struct_file.read()) if encode_detect is None: encode_detect = {"encoding": "utf-8"} loader_kwargs["encoding"] = encode_detect["encoding"] elif loader_name == "JSONLoader": loader_kwargs.setdefault("jq_schema", ".") loader_kwargs.setdefault("text_content", False) elif loader_name == "JSONLinesLoader": loader_kwargs.setdefault("jq_schema", ".") loader_kwargs.setdefault("text_content", False) loader = DocumentLoader(file_path, **loader_kwargs) return loader def make_text_splitter( splitter_name: str = TEXT_SPLITTER_NAME, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, llm_model: str = "", ): splitter_name = splitter_name or "SpacyTextSplitter" try: text_splitter_dict = GetTextSplitterDict() if splitter_name == "MarkdownHeaderTextSplitter": headers_to_split_on = text_splitter_dict[splitter_name]['headers_to_split_on'] text_splitter = langchain.text_splitter.MarkdownHeaderTextSplitter( headers_to_split_on=headers_to_split_on) elif splitter_name == "ChineseRecursiveTextSplitter": text_splitter_module = importlib.import_module('text_splitter') TextSplitter = getattr(text_splitter_module, splitter_name) text_splitter = TextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) else: try: text_splitter_module = importlib.import_module('text_splitter') TextSplitter = getattr(text_splitter_module, splitter_name) except: text_splitter_module = importlib.import_module('langchain.text_splitter') TextSplitter = getattr(text_splitter_module, splitter_name) if text_splitter_dict[splitter_name]["source"] == "tiktoken": try: text_splitter = TextSplitter.from_tiktoken_encoder( encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"], pipeline="zh_core_web_sm", chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except: text_splitter = TextSplitter.from_tiktoken_encoder( encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"], chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_dict[splitter_name]["source"] == "huggingface": if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "": config = get_model_worker_config(llm_model) text_splitter_dict[splitter_name]["tokenizer_name_or_path"] = \ config.get("model_path") if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "gpt2": from transformers import GPT2TokenizerFast from langchain.text_splitter import CharacterTextSplitter tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") else: from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( text_splitter_dict[splitter_name]["tokenizer_name_or_path"], trust_remote_code=True) text_splitter = TextSplitter.from_huggingface_tokenizer( tokenizer=tokenizer, chunk_size=chunk_size, chunk_overlap=chunk_overlap ) else: try: text_splitter = TextSplitter( pipeline="zh_core_web_sm", chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except: text_splitter = TextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) except Exception as e: print(e) text_splitter_module = importlib.import_module('langchain.text_splitter') TextSplitter = getattr(text_splitter_module, "RecursiveCharacterTextSplitter") text_splitter = TextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) splitter_name = "RecursiveCharacterTextSplitter" return text_splitter, splitter_name def list_kbs_from_folder(): kb_config = GetKbConfig() kb_root_path = GetKbRootPath(kb_config) kb_list = [] try: dirs = os.listdir(kb_root_path) for f in dirs: if os.path.isdir(os.path.join(kb_root_path, f)): kb_list.append(f) except Exception as e: pass return kb_list class KnowledgeFile: def __init__( self, filename: str, knowledge_base_name: str, loader_kwargs: Dict = {}, ): self.kb_name = knowledge_base_name self.filename = str(Path(filename).as_posix()) self.ext = os.path.splitext(filename)[-1].lower() if self.ext not in SUPPORTED_EXTS: raise ValueError(f"Not support file format: {self.filename}") self.loader_kwargs = loader_kwargs self.filepath = get_file_path(knowledge_base_name, filename) self.docs = None self.splited_docs = None self.document_loader_name = get_LoaderClass(self.ext) self.text_splitter_name = TEXT_SPLITTER_NAME def file2docs(self, refresh: bool = False): if self.docs is None or refresh: print(f"{self.document_loader_name} used for {self.filepath}") loader = get_loader(loader_name=self.document_loader_name, file_path=self.filepath, loader_kwargs=self.loader_kwargs) self.docs = loader.load() return self.docs def docs2texts( self, docs: List[Document] = None, zh_title_enhance: bool = ZH_TITLE_ENHANCE, refresh: bool = False, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, text_splitter: TextSplitter = None, ): docs = docs or self.file2docs(refresh=refresh) if not docs: return [] if self.ext not in [".csv"]: if text_splitter is None: text_splitter, new_text_splitter_name = make_text_splitter(splitter_name=self.text_splitter_name, chunk_size=chunk_size, chunk_overlap=chunk_overlap) if new_text_splitter_name != self.text_splitter_name: self.text_splitter_name = new_text_splitter_name if self.text_splitter_name == "MarkdownHeaderTextSplitter": docs = text_splitter.split_text(docs[0].page_content) else: docs = text_splitter.split_documents(docs) if not docs: return [] print(f"Document split samples: {docs[0]}") if zh_title_enhance: docs = func_zh_title_enhance(docs) self.splited_docs = docs return self.splited_docs def file2text( self, zh_title_enhance: bool = ZH_TITLE_ENHANCE, refresh: bool = False, chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, text_splitter: TextSplitter = None, ): if self.splited_docs is None or refresh: docs = self.file2docs() self.splited_docs = self.docs2texts(docs=docs, zh_title_enhance=zh_title_enhance, refresh=refresh, chunk_size=chunk_size, chunk_overlap=chunk_overlap, text_splitter=text_splitter) return self.splited_docs def file_exist(self): return os.path.isfile(self.filepath) def get_mtime(self): return os.path.getmtime(self.filepath) def get_size(self): return os.path.getsize(self.filepath) def files2docs_in_thread( files: List[Union[KnowledgeFile, Tuple[str, str], Dict]], chunk_size: int = CHUNK_SIZE, chunk_overlap: int = OVERLAP_SIZE, zh_title_enhance: bool = ZH_TITLE_ENHANCE, ) -> Generator: def file2docs(*, file: KnowledgeFile, **kwargs) -> Tuple[bool, Tuple[str, str, List[Document]]]: try: return True, (file.kb_name, file.filename, file.file2text(**kwargs)) except Exception as e: msg = f"from {file.kb_name}/{file.filename} load failed: {e}" return False, (file.kb_name, file.filename, msg) kwargs_list = [] for i, file in enumerate(files): kwargs = {} try: if isinstance(file, tuple) and len(file) >= 2: filename = file[0] kb_name = file[1] file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name) elif isinstance(file, dict): filename = file.pop("filename") kb_name = file.pop("kb_name") kwargs.update(file) file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name) kwargs["file"] = file kwargs["chunk_size"] = chunk_size kwargs["chunk_overlap"] = chunk_overlap kwargs["zh_title_enhance"] = zh_title_enhance kwargs_list.append(kwargs) except Exception as e: yield False, (kb_name, filename, str(e)) for result in run_in_thread_pool(func=file2docs, params=kwargs_list): yield result
[ "langchain.text_splitter.TextSplitter.from_huggingface_tokenizer", "langchain.text_splitter.TextSplitter.from_tiktoken_encoder", "langchain.text_splitter.TextSplitter" ]
[((2330, 2343), 'WebUI.configs.basicconfig.GetKbConfig', 'GetKbConfig', ([], {}), '()\n', (2341, 2343), False, 'from WebUI.configs.basicconfig import GetKbConfig, GetKbRootPath, GetTextSplitterDict\n'), ((2363, 2387), 'WebUI.configs.basicconfig.GetKbRootPath', 'GetKbRootPath', (['kb_config'], {}), '(kb_config)\n', (2376, 2387), False, 'from WebUI.configs.basicconfig import GetKbConfig, GetKbRootPath, GetTextSplitterDict\n'), ((2399, 2446), 'os.path.join', 'os.path.join', (['kb_root_path', 'knowledge_base_name'], {}), '(kb_root_path, knowledge_base_name)\n', (2411, 2446), False, 'import os\n'), ((9822, 9835), 'WebUI.configs.basicconfig.GetKbConfig', 'GetKbConfig', ([], {}), '()\n', (9833, 9835), False, 'from WebUI.configs.basicconfig import GetKbConfig, GetKbRootPath, GetTextSplitterDict\n'), ((9855, 9879), 'WebUI.configs.basicconfig.GetKbRootPath', 'GetKbRootPath', (['kb_config'], {}), '(kb_config)\n', (9868, 9879), False, 'from WebUI.configs.basicconfig import GetKbConfig, GetKbRootPath, GetTextSplitterDict\n'), ((15110, 15164), 'WebUI.Server.utils.run_in_thread_pool', 'run_in_thread_pool', ([], {'func': 'file2docs', 'params': 'kwargs_list'}), '(func=file2docs, params=kwargs_list)\n', (15128, 15164), False, 'from WebUI.Server.utils import run_in_thread_pool, get_model_worker_config\n'), ((3792, 3812), 'os.scandir', 'os.scandir', (['doc_path'], {}), '(doc_path)\n', (3802, 3812), False, 'import os\n'), ((6015, 6036), 'WebUI.configs.basicconfig.GetTextSplitterDict', 'GetTextSplitterDict', ([], {}), '()\n', (6034, 6036), False, 'from WebUI.configs.basicconfig import GetKbConfig, GetKbRootPath, GetTextSplitterDict\n'), ((9921, 9945), 'os.listdir', 'os.listdir', (['kb_root_path'], {}), '(kb_root_path)\n', (9931, 9945), False, 'import os\n'), ((13436, 13465), 'os.path.isfile', 'os.path.isfile', (['self.filepath'], {}), '(self.filepath)\n', (13450, 13465), False, 'import os\n'), ((13507, 13538), 'os.path.getmtime', 'os.path.getmtime', (['self.filepath'], {}), '(self.filepath)\n', (13523, 13538), False, 'import os\n'), ((13579, 13609), 'os.path.getsize', 'os.path.getsize', (['self.filepath'], {}), '(self.filepath)\n', (13594, 13609), False, 'import os\n'), ((3297, 3325), 'os.path.realpath', 'os.path.realpath', (['entry.path'], {}), '(entry.path)\n', (3313, 3325), False, 'import os\n'), ((4750, 4803), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (4773, 4803), False, 'import importlib\n'), ((9453, 9503), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (9476, 9503), False, 'import importlib\n'), ((9615, 9679), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (9627, 9679), False, 'from langchain.text_splitter import TextSplitter\n'), ((12478, 12505), 'WebUI.text_splitter.zh_title_enhance', 'func_zh_title_enhance', (['docs'], {}), '(docs)\n', (12499, 12505), True, 'from WebUI.text_splitter import zh_title_enhance as func_zh_title_enhance\n'), ((2990, 3012), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3006, 3012), False, 'import os\n'), ((3343, 3366), 'os.scandir', 'os.scandir', (['target_path'], {}), '(target_path)\n', (3353, 3366), False, 'import os\n'), ((4434, 4487), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (4457, 4487), False, 'import importlib\n'), ((6420, 6460), 'importlib.import_module', 'importlib.import_module', (['"""text_splitter"""'], {}), "('text_splitter')\n", (6443, 6460), False, 'import importlib\n'), ((6561, 6625), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (6573, 6625), False, 'from langchain.text_splitter import TextSplitter\n'), ((9998, 10027), 'os.path.join', 'os.path.join', (['kb_root_path', 'f'], {}), '(kb_root_path, f)\n', (10010, 10027), False, 'import os\n'), ((6696, 6736), 'importlib.import_module', 'importlib.import_module', (['"""text_splitter"""'], {}), "('text_splitter')\n", (6719, 6736), False, 'import importlib\n'), ((10362, 10376), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (10366, 10376), False, 'from pathlib import Path\n'), ((10408, 10434), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (10424, 10434), False, 'import os\n'), ((3670, 3692), 'os.scandir', 'os.scandir', (['entry.path'], {}), '(entry.path)\n', (3680, 3692), False, 'import os\n'), ((6872, 6922), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (6895, 6922), False, 'import importlib\n'), ((7131, 7328), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], pipeline='zh_core_web_sm',\n chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n", (7165, 7328), False, 'from langchain.text_splitter import TextSplitter\n'), ((8731, 8848), 'langchain.text_splitter.TextSplitter.from_huggingface_tokenizer', 'TextSplitter.from_huggingface_tokenizer', ([], {'tokenizer': 'tokenizer', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(tokenizer=tokenizer, chunk_size=\n chunk_size, chunk_overlap=chunk_overlap)\n', (8770, 8848), False, 'from langchain.text_splitter import TextSplitter\n'), ((3536, 3573), 'os.path.relpath', 'os.path.relpath', (['entry.path', 'doc_path'], {}), '(entry.path, doc_path)\n', (3551, 3573), False, 'import os\n'), ((7498, 7668), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (7532, 7668), False, 'from langchain.text_splitter import TextSplitter\n'), ((7948, 7982), 'WebUI.Server.utils.get_model_worker_config', 'get_model_worker_config', (['llm_model'], {}), '(llm_model)\n', (7971, 7982), False, 'from WebUI.Server.utils import run_in_thread_pool, get_model_worker_config\n'), ((8380, 8421), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (8413, 8421), False, 'from transformers import GPT2TokenizerFast\n'), ((8535, 8654), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (["text_splitter_dict[splitter_name]['tokenizer_name_or_path']"], {'trust_remote_code': '(True)'}), "(text_splitter_dict[splitter_name][\n 'tokenizer_name_or_path'], trust_remote_code=True)\n", (8564, 8654), False, 'from transformers import AutoTokenizer\n'), ((8997, 9092), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(pipeline='zh_core_web_sm', chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (9009, 9092), False, 'from langchain.text_splitter import TextSplitter\n'), ((9243, 9307), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (9255, 9307), False, 'from langchain.text_splitter import TextSplitter\n')]
# with this Python script we are loading all Knowledge Asset vector embeddings into Milvus VectorDB import os import pandas as pd import langchain from dotenv import load_dotenv from langchain.document_loaders import DataFrameLoader from langchain.embeddings import HuggingFaceEmbeddings from langchain.vectorstores import Milvus load_dotenv() df = pd.read_csv("../utils/output.tsv", sep="\t") loader = DataFrameLoader(df, "body") docs = loader.load() vector_db = Milvus( collection_name="OfficeHoursDemoCollection", embedding_function=HuggingFaceEmbeddings(model_name="multi-qa-MiniLM-L6-cos-v1"), connection_args={ "uri": os.getenv("MILVUS_URI"), "user": os.getenv("MILVUS_USER"), "password": os.getenv("MILVUS_PASSWORD"), "secure": True, }, ) vector_db.add_documents(docs)
[ "langchain.document_loaders.DataFrameLoader", "langchain.embeddings.HuggingFaceEmbeddings" ]
[((332, 345), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (343, 345), False, 'from dotenv import load_dotenv\n'), ((352, 396), 'pandas.read_csv', 'pd.read_csv', (['"""../utils/output.tsv"""'], {'sep': '"""\t"""'}), "('../utils/output.tsv', sep='\\t')\n", (363, 396), True, 'import pandas as pd\n'), ((406, 433), 'langchain.document_loaders.DataFrameLoader', 'DataFrameLoader', (['df', '"""body"""'], {}), "(df, 'body')\n", (421, 433), False, 'from langchain.document_loaders import DataFrameLoader\n'), ((548, 609), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""multi-qa-MiniLM-L6-cos-v1"""'}), "(model_name='multi-qa-MiniLM-L6-cos-v1')\n", (569, 609), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((648, 671), 'os.getenv', 'os.getenv', (['"""MILVUS_URI"""'], {}), "('MILVUS_URI')\n", (657, 671), False, 'import os\n'), ((689, 713), 'os.getenv', 'os.getenv', (['"""MILVUS_USER"""'], {}), "('MILVUS_USER')\n", (698, 713), False, 'import os\n'), ((735, 763), 'os.getenv', 'os.getenv', (['"""MILVUS_PASSWORD"""'], {}), "('MILVUS_PASSWORD')\n", (744, 763), False, 'import os\n')]
import os import langchain from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Chroma from langchain.text_splitter import CharacterTextSplitter from langchain import OpenAI, VectorDBQA from langchain.document_loaders import TextLoader from langchain.document_loaders import WebBaseLoader from langchain.agents.agent_toolkits import ( create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo, ) os.environ['serpapi_api_key']="YOUR_serpapi_api_key" os.environ['OPENAI_API_KEY']="YOUR_OPENAI_API_KEY" llm = OpenAI(temperature=0) loader = TextLoader('the_needed_text.txt') documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() text_store = Chroma.from_documents(texts, embeddings, collection_name="the_needed_text") loader = WebBaseLoader("https://beta.ruff.rs/docs/faq/") docs = loader.load() ruff_texts = text_splitter.split_documents(docs) ruff_store = Chroma.from_documents(ruff_texts, embeddings, collection_name="ruff") vectorstore_info = VectorStoreInfo( name="the_needed_text_in_detail", description="the most recent data of bill gates", vectorstore=text_store ) toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info) agent_executor = create_vectorstore_agent( llm=llm, toolkit=toolkit, verbose=True ) agent_executor.run("who is bill gates?, what is his age now? and how many assets he has now? ")
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.agents.agent_toolkits.VectorStoreToolkit", "langchain.agents.agent_toolkits.VectorStoreInfo", "langchain.document_loaders.TextLoader", "langchain.agents.agent_toolkits.create_vectorstore_agent", "langchain.document_loaders.WebBaseLoader", "langchain.vectorstores.Chroma.from_documents", "langchain.OpenAI", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((586, 607), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (592, 607), False, 'from langchain import OpenAI, VectorDBQA\n'), ((622, 655), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""the_needed_text.txt"""'], {}), "('the_needed_text.txt')\n", (632, 655), False, 'from langchain.document_loaders import TextLoader\n'), ((700, 755), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (721, 755), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((822, 840), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (838, 840), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((855, 930), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {'collection_name': '"""the_needed_text"""'}), "(texts, embeddings, collection_name='the_needed_text')\n", (876, 930), False, 'from langchain.vectorstores import Chroma\n'), ((943, 990), 'langchain.document_loaders.WebBaseLoader', 'WebBaseLoader', (['"""https://beta.ruff.rs/docs/faq/"""'], {}), "('https://beta.ruff.rs/docs/faq/')\n", (956, 990), False, 'from langchain.document_loaders import WebBaseLoader\n'), ((1077, 1146), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['ruff_texts', 'embeddings'], {'collection_name': '"""ruff"""'}), "(ruff_texts, embeddings, collection_name='ruff')\n", (1098, 1146), False, 'from langchain.vectorstores import Chroma\n'), ((1169, 1297), 'langchain.agents.agent_toolkits.VectorStoreInfo', 'VectorStoreInfo', ([], {'name': '"""the_needed_text_in_detail"""', 'description': '"""the most recent data of bill gates"""', 'vectorstore': 'text_store'}), "(name='the_needed_text_in_detail', description=\n 'the most recent data of bill gates', vectorstore=text_store)\n", (1184, 1297), False, 'from langchain.agents.agent_toolkits import create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo\n'), ((1324, 1377), 'langchain.agents.agent_toolkits.VectorStoreToolkit', 'VectorStoreToolkit', ([], {'vectorstore_info': 'vectorstore_info'}), '(vectorstore_info=vectorstore_info)\n', (1342, 1377), False, 'from langchain.agents.agent_toolkits import create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo\n'), ((1400, 1464), 'langchain.agents.agent_toolkits.create_vectorstore_agent', 'create_vectorstore_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit', 'verbose': '(True)'}), '(llm=llm, toolkit=toolkit, verbose=True)\n', (1424, 1464), False, 'from langchain.agents.agent_toolkits import create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo\n')]
"""Beta Feature: base interface for cache.""" from __future__ import annotations import hashlib import inspect import json import logging from abc import ABC, abstractmethod from datetime import timedelta from typing import ( TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union, cast, ) from sqlalchemy import Column, Integer, String, create_engine, select from sqlalchemy.engine.base import Engine from sqlalchemy.orm import Session from langchain.utils import get_from_env try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain.embeddings.base import Embeddings from langchain.load.dump import dumps from langchain.load.load import loads from langchain.schema import Generation from langchain.vectorstores.redis import Redis as RedisVectorstore logger = logging.getLogger(__file__) if TYPE_CHECKING: import momento RETURN_VAL_TYPE = Sequence[Generation] def _hash(_input: str) -> str: """Use a deterministic hashing approach.""" return hashlib.md5(_input.encode()).hexdigest() def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str: """Dump generations to json. Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: Json representing a list of generations. """ return json.dumps([generation.dict() for generation in generations]) def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE: """Load generations from json. Args: generations_json (str): A string of json representing a list of generations. Raises: ValueError: Could not decode json string to list of generations. Returns: RETURN_VAL_TYPE: A list of generations. """ try: results = json.loads(generations_json) return [Generation(**generation_dict) for generation_dict in results] except json.JSONDecodeError: raise ValueError( f"Could not decode json to list of generations: {generations_json}" ) class BaseCache(ABC): """Base interface for cache.""" @abstractmethod def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" @abstractmethod def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" @abstractmethod def clear(self, **kwargs: Any) -> None: """Clear cache that can take additional keyword arguments.""" class InMemoryCache(BaseCache): """Cache that stores things in memory.""" def __init__(self) -> None: """Initialize with empty cache.""" self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" return self._cache.get((prompt, llm_string), None) def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" self._cache[(prompt, llm_string)] = return_val def clear(self, **kwargs: Any) -> None: """Clear cache.""" self._cache = {} Base = declarative_base() class FullLLMCache(Base): # type: ignore """SQLite table for full LLM Cache (all generations).""" __tablename__ = "full_llm_cache" prompt = Column(String, primary_key=True) llm = Column(String, primary_key=True) idx = Column(Integer, primary_key=True) response = Column(String) class SQLAlchemyCache(BaseCache): """Cache that uses SQAlchemy as a backend.""" def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache): """Initialize by creating all tables.""" self.engine = engine self.cache_schema = cache_schema self.cache_schema.metadata.create_all(self.engine) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" stmt = ( select(self.cache_schema.response) .where(self.cache_schema.prompt == prompt) # type: ignore .where(self.cache_schema.llm == llm_string) .order_by(self.cache_schema.idx) ) with Session(self.engine) as session: rows = session.execute(stmt).fetchall() if rows: try: return [loads(row[0]) for row in rows] except Exception: logger.warning( "Retrieving a cache value that could not be deserialized " "properly. This is likely due to the cache being in an " "older format. Please recreate your cache to avoid this " "error." ) # In a previous life we stored the raw text directly # in the table, so assume it's in that format. return [Generation(text=row[0]) for row in rows] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update based on prompt and llm_string.""" items = [ self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i) for i, gen in enumerate(return_val) ] with Session(self.engine) as session, session.begin(): for item in items: session.merge(item) def clear(self, **kwargs: Any) -> None: """Clear cache.""" with Session(self.engine) as session: session.query(self.cache_schema).delete() class SQLiteCache(SQLAlchemyCache): """Cache that uses SQLite as a backend.""" def __init__(self, database_path: str = ".langchain.db"): """Initialize by creating the engine and all tables.""" engine = create_engine(f"sqlite:///{database_path}") super().__init__(engine) class RedisCache(BaseCache): """Cache that uses Redis as a backend.""" # TODO - implement a TTL policy in Redis def __init__(self, redis_: Any): """Initialize by passing in Redis instance.""" try: from redis import Redis except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) if not isinstance(redis_, Redis): raise ValueError("Please pass in Redis object.") self.redis = redis_ def _key(self, prompt: str, llm_string: str) -> str: """Compute key from prompt and llm_string""" return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" generations = [] # Read from a Redis HASH results = self.redis.hgetall(self._key(prompt, llm_string)) if results: for _, text in results.items(): generations.append(Generation(text=text)) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) # Write to a Redis HASH key = self._key(prompt, llm_string) self.redis.hset( key, mapping={ str(idx): generation.text for idx, generation in enumerate(return_val) }, ) def clear(self, **kwargs: Any) -> None: """Clear cache. If `asynchronous` is True, flush asynchronously.""" asynchronous = kwargs.get("asynchronous", False) self.redis.flushdb(asynchronous=asynchronous, **kwargs) class RedisSemanticCache(BaseCache): """Cache that uses Redis as a vector-store backend.""" # TODO - implement a TTL policy in Redis def __init__( self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2 ): """Initialize by passing in the `init` GPTCache func Args: redis_url (str): URL to connect to Redis. embedding (Embedding): Embedding provider for semantic encoding and search. score_threshold (float, 0.2): Example: .. code-block:: python import langchain from langchain.cache import RedisSemanticCache from langchain.embeddings import OpenAIEmbeddings langchain.llm_cache = RedisSemanticCache( redis_url="redis://localhost:6379", embedding=OpenAIEmbeddings() ) """ self._cache_dict: Dict[str, RedisVectorstore] = {} self.redis_url = redis_url self.embedding = embedding self.score_threshold = score_threshold def _index_name(self, llm_string: str) -> str: hashed_index = _hash(llm_string) return f"cache:{hashed_index}" def _get_llm_cache(self, llm_string: str) -> RedisVectorstore: index_name = self._index_name(llm_string) # return vectorstore client for the specific llm string if index_name in self._cache_dict: return self._cache_dict[index_name] # create new vectorstore client for the specific llm string try: self._cache_dict[index_name] = RedisVectorstore.from_existing_index( embedding=self.embedding, index_name=index_name, redis_url=self.redis_url, ) except ValueError: redis = RedisVectorstore( embedding_function=self.embedding.embed_query, index_name=index_name, redis_url=self.redis_url, ) _embedding = self.embedding.embed_query(text="test") redis._create_index(dim=len(_embedding)) self._cache_dict[index_name] = redis return self._cache_dict[index_name] def clear(self, **kwargs: Any) -> None: """Clear semantic cache for a given llm_string.""" index_name = self._index_name(kwargs["llm_string"]) if index_name in self._cache_dict: self._cache_dict[index_name].drop_index( index_name=index_name, delete_documents=True, redis_url=self.redis_url ) del self._cache_dict[index_name] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" llm_cache = self._get_llm_cache(llm_string) generations = [] # Read from a Hash results = llm_cache.similarity_search_limit_score( query=prompt, k=1, score_threshold=self.score_threshold, ) if results: for document in results: for text in document.metadata["return_val"]: generations.append(Generation(text=text)) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisSemanticCache only supports caching of " f"normal LLM generations, got {type(gen)}" ) llm_cache = self._get_llm_cache(llm_string) # Write to vectorstore metadata = { "llm_string": llm_string, "prompt": prompt, "return_val": [generation.text for generation in return_val], } llm_cache.add_texts(texts=[prompt], metadatas=[metadata]) class GPTCache(BaseCache): """Cache that uses GPTCache as a backend.""" def __init__( self, init_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = None, ): """Initialize by passing in init function (default: `None`). Args: init_func (Optional[Callable[[Any], None]]): init `GPTCache` function (default: `None`) Example: .. code-block:: python # Initialize GPTCache with a custom init function import gptcache from gptcache.processor.pre import get_prompt from gptcache.manager.factory import get_data_manager # Avoid multiple caches using the same file, causing different llm model caches to affect each other def init_gptcache(cache_obj: gptcache.Cache, llm str): cache_obj.init( pre_embedding_func=get_prompt, data_manager=manager_factory( manager="map", data_dir=f"map_cache_{llm}" ), ) langchain.llm_cache = GPTCache(init_gptcache) """ try: import gptcache # noqa: F401 except ImportError: raise ImportError( "Could not import gptcache python package. " "Please install it with `pip install gptcache`." ) self.init_gptcache_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = init_func self.gptcache_dict: Dict[str, Any] = {} def _new_gptcache(self, llm_string: str) -> Any: """New gptcache object""" from gptcache import Cache from gptcache.manager.factory import get_data_manager from gptcache.processor.pre import get_prompt _gptcache = Cache() if self.init_gptcache_func is not None: sig = inspect.signature(self.init_gptcache_func) if len(sig.parameters) == 2: self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg] else: self.init_gptcache_func(_gptcache) # type: ignore[call-arg] else: _gptcache.init( pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=llm_string), ) self.gptcache_dict[llm_string] = _gptcache return _gptcache def _get_gptcache(self, llm_string: str) -> Any: """Get a cache object. When the corresponding llm model cache does not exist, it will be created.""" return self.gptcache_dict.get(llm_string, self._new_gptcache(llm_string)) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up the cache data. First, retrieve the corresponding cache object using the `llm_string` parameter, and then retrieve the data from the cache based on the `prompt`. """ from gptcache.adapter.api import get _gptcache = self.gptcache_dict.get(llm_string, None) if _gptcache is None: return None res = get(prompt, cache_obj=_gptcache) if res: return [ Generation(**generation_dict) for generation_dict in json.loads(res) ] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache. First, retrieve the corresponding cache object using the `llm_string` parameter, and then store the `prompt` and `return_val` in the cache object. """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "GPTCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) from gptcache.adapter.api import put _gptcache = self._get_gptcache(llm_string) handled_data = json.dumps([generation.dict() for generation in return_val]) put(prompt, handled_data, cache_obj=_gptcache) return None def clear(self, **kwargs: Any) -> None: """Clear cache.""" from gptcache import Cache for gptcache_instance in self.gptcache_dict.values(): gptcache_instance = cast(Cache, gptcache_instance) gptcache_instance.flush() self.gptcache_dict.clear() def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None: """Create cache if it doesn't exist. Raises: SdkException: Momento service or network error Exception: Unexpected response """ from momento.responses import CreateCache create_cache_response = cache_client.create_cache(cache_name) if isinstance(create_cache_response, CreateCache.Success) or isinstance( create_cache_response, CreateCache.CacheAlreadyExists ): return None elif isinstance(create_cache_response, CreateCache.Error): raise create_cache_response.inner_exception else: raise Exception(f"Unexpected response cache creation: {create_cache_response}") def _validate_ttl(ttl: Optional[timedelta]) -> None: if ttl is not None and ttl <= timedelta(seconds=0): raise ValueError(f"ttl must be positive but was {ttl}.") class MomentoCache(BaseCache): """Cache that uses Momento as a backend. See https://gomomento.com/""" def __init__( self, cache_client: momento.CacheClient, cache_name: str, *, ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True, ): """Instantiate a prompt cache using Momento as a backend. Note: to instantiate the cache client passed to MomentoCache, you must have a Momento account. See https://gomomento.com/. Args: cache_client (CacheClient): The Momento cache client. cache_name (str): The name of the cache to use to store the data. ttl (Optional[timedelta], optional): The time to live for the cache items. Defaults to None, ie use the client default TTL. ensure_cache_exists (bool, optional): Create the cache if it doesn't exist. Defaults to True. Raises: ImportError: Momento python package is not installed. TypeError: cache_client is not of type momento.CacheClientObject ValueError: ttl is non-null and non-negative """ try: from momento import CacheClient except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if not isinstance(cache_client, CacheClient): raise TypeError("cache_client must be a momento.CacheClient object.") _validate_ttl(ttl) if ensure_cache_exists: _ensure_cache_exists(cache_client, cache_name) self.cache_client = cache_client self.cache_name = cache_name self.ttl = ttl @classmethod def from_client_params( cls, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any, ) -> MomentoCache: """Construct cache from CacheClient parameters.""" try: from momento import CacheClient, Configurations, CredentialProvider except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if configuration is None: configuration = Configurations.Laptop.v1() auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN") credentials = CredentialProvider.from_string(auth_token) cache_client = CacheClient(configuration, credentials, default_ttl=ttl) return cls(cache_client, cache_name, ttl=ttl, **kwargs) def __key(self, prompt: str, llm_string: str) -> str: """Compute cache key from prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Returns: str: The cache key. """ return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Lookup llm generations in cache by prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Raises: SdkException: Momento service or network error Returns: Optional[RETURN_VAL_TYPE]: A list of language model generations. """ from momento.responses import CacheGet generations: RETURN_VAL_TYPE = [] get_response = self.cache_client.get( self.cache_name, self.__key(prompt, llm_string) ) if isinstance(get_response, CacheGet.Hit): value = get_response.value_string generations = _load_generations_from_json(value) elif isinstance(get_response, CacheGet.Miss): pass elif isinstance(get_response, CacheGet.Error): raise get_response.inner_exception return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Store llm generations in cache. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model string. return_val (RETURN_VAL_TYPE): A list of language model generations. Raises: SdkException: Momento service or network error Exception: Unexpected response """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "Momento only supports caching of normal LLM generations, " f"got {type(gen)}" ) key = self.__key(prompt, llm_string) value = _dump_generations_to_json(return_val) set_response = self.cache_client.set(self.cache_name, key, value, self.ttl) from momento.responses import CacheSet if isinstance(set_response, CacheSet.Success): pass elif isinstance(set_response, CacheSet.Error): raise set_response.inner_exception else: raise Exception(f"Unexpected response: {set_response}") def clear(self, **kwargs: Any) -> None: """Clear the cache. Raises: SdkException: Momento service or network error """ from momento.responses import CacheFlush flush_response = self.cache_client.flush_cache(self.cache_name) if isinstance(flush_response, CacheFlush.Success): pass elif isinstance(flush_response, CacheFlush.Error): raise flush_response.inner_exception
[ "langchain.utils.get_from_env", "langchain.schema.Generation", "langchain.load.dump.dumps", "langchain.vectorstores.redis.Redis.from_existing_index", "langchain.vectorstores.redis.Redis", "langchain.load.load.loads" ]
[((918, 945), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (935, 945), False, 'import logging\n'), ((3390, 3408), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3406, 3408), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((3565, 3597), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (3571, 3597), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3608, 3640), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (3614, 3640), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3651, 3684), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (3657, 3684), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3700, 3714), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3706, 3714), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((1888, 1916), 'json.loads', 'json.loads', (['generations_json'], {}), '(generations_json)\n', (1898, 1916), False, 'import json\n'), ((6089, 6132), 'sqlalchemy.create_engine', 'create_engine', (['f"""sqlite:///{database_path}"""'], {}), "(f'sqlite:///{database_path}')\n", (6102, 6132), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((14135, 14142), 'gptcache.Cache', 'Cache', ([], {}), '()\n', (14140, 14142), False, 'from gptcache import Cache\n'), ((15447, 15479), 'gptcache.adapter.api.get', 'get', (['prompt'], {'cache_obj': '_gptcache'}), '(prompt, cache_obj=_gptcache)\n', (15450, 15479), False, 'from gptcache.adapter.api import get\n'), ((16366, 16412), 'gptcache.adapter.api.put', 'put', (['prompt', 'handled_data'], {'cache_obj': '_gptcache'}), '(prompt, handled_data, cache_obj=_gptcache)\n', (16369, 16412), False, 'from gptcache.adapter.api import put\n'), ((20261, 20303), 'momento.CredentialProvider.from_string', 'CredentialProvider.from_string', (['auth_token'], {}), '(auth_token)\n', (20291, 20303), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((20327, 20383), 'momento.CacheClient', 'CacheClient', (['configuration', 'credentials'], {'default_ttl': 'ttl'}), '(configuration, credentials, default_ttl=ttl)\n', (20338, 20383), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((1933, 1962), 'langchain.schema.Generation', 'Generation', ([], {}), '(**generation_dict)\n', (1943, 1962), False, 'from langchain.schema import Generation\n'), ((4464, 4484), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (4471, 4484), False, 'from sqlalchemy.orm import Session\n'), ((5571, 5591), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (5578, 5591), False, 'from sqlalchemy.orm import Session\n'), ((5773, 5793), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (5780, 5793), False, 'from sqlalchemy.orm import Session\n'), ((9839, 9955), 'langchain.vectorstores.redis.Redis.from_existing_index', 'RedisVectorstore.from_existing_index', ([], {'embedding': 'self.embedding', 'index_name': 'index_name', 'redis_url': 'self.redis_url'}), '(embedding=self.embedding, index_name=\n index_name, redis_url=self.redis_url)\n', (9875, 9955), True, 'from langchain.vectorstores.redis import Redis as RedisVectorstore\n'), ((14209, 14251), 'inspect.signature', 'inspect.signature', (['self.init_gptcache_func'], {}), '(self.init_gptcache_func)\n', (14226, 14251), False, 'import inspect\n'), ((16635, 16665), 'typing.cast', 'cast', (['Cache', 'gptcache_instance'], {}), '(Cache, gptcache_instance)\n', (16639, 16665), False, 'from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union, cast\n'), ((17565, 17585), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (17574, 17585), False, 'from datetime import timedelta\n'), ((20128, 20154), 'momento.Configurations.Laptop.v1', 'Configurations.Laptop.v1', ([], {}), '()\n', (20152, 20154), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((20190, 20238), 'langchain.utils.get_from_env', 'get_from_env', (['"""auth_token"""', '"""MOMENTO_AUTH_TOKEN"""'], {}), "('auth_token', 'MOMENTO_AUTH_TOKEN')\n", (20202, 20238), False, 'from langchain.utils import get_from_env\n'), ((10061, 10178), 'langchain.vectorstores.redis.Redis', 'RedisVectorstore', ([], {'embedding_function': 'self.embedding.embed_query', 'index_name': 'index_name', 'redis_url': 'self.redis_url'}), '(embedding_function=self.embedding.embed_query, index_name=\n index_name, redis_url=self.redis_url)\n', (10077, 10178), True, 'from langchain.vectorstores.redis import Redis as RedisVectorstore\n'), ((15533, 15562), 'langchain.schema.Generation', 'Generation', ([], {}), '(**generation_dict)\n', (15543, 15562), False, 'from langchain.schema import Generation\n'), ((5481, 5491), 'langchain.load.dump.dumps', 'dumps', (['gen'], {}), '(gen)\n', (5486, 5491), False, 'from langchain.load.dump import dumps\n'), ((7268, 7289), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (7278, 7289), False, 'from langchain.schema import Generation\n'), ((14595, 14633), 'gptcache.manager.factory.get_data_manager', 'get_data_manager', ([], {'data_path': 'llm_string'}), '(data_path=llm_string)\n', (14611, 14633), False, 'from gptcache.manager.factory import get_data_manager\n'), ((15586, 15601), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (15596, 15601), False, 'import json\n'), ((4619, 4632), 'langchain.load.load.loads', 'loads', (['row[0]'], {}), '(row[0])\n', (4624, 4632), False, 'from langchain.load.load import loads\n'), ((11414, 11435), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (11424, 11435), False, 'from langchain.schema import Generation\n'), ((5189, 5212), 'langchain.schema.Generation', 'Generation', ([], {'text': 'row[0]'}), '(text=row[0])\n', (5199, 5212), False, 'from langchain.schema import Generation\n'), ((4234, 4268), 'sqlalchemy.select', 'select', (['self.cache_schema.response'], {}), '(self.cache_schema.response)\n', (4240, 4268), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n')]
# INITIALIZATION # LangChain imports import langchain from langchain.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from langchain.chains import SequentialChain # General imports import os from dotenv import load_dotenv # Load API key from .env load_dotenv() os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") # LangChain debugging settings langchain.debug = False langchain.verbose = False """ NOTE LangChain applications can be debugged easily either 1. by enabling the debug mode or 2. by enabling verbose outputs (recommended for agents). Turn on (switch to True) one of the above and see what happens when the chain executes. Reference: https://python.langchain.com/docs/guides/debugging """ # MODEL llm = OpenAI(temperature=0.6) # PROMPT MANAGEMENT def generate_recipe_names(selected_items): """ Generate a list of recipe names using a list of selected ingredients by executing an LLMChain. Args: selected_items (list): A list of ingredients selected by the user Returns: dict: A dictionary of recipe names """ # Set up prompt template prompt_template_recipe_name = PromptTemplate( input_variables=["ingredients"], template="Generate a list of meal names that can be prepared using the provided ingredients. " "Ingredients are {ingredients}. " "It's not necessary to use all of the ingredients, " "and the list can include both simple and complex meal names. " "Please consider the ingredients provided and suggest meal names accordingly.", ) # Set up chain recipe_name_chain = LLMChain( llm=llm, prompt=prompt_template_recipe_name, output_key="recipe_name" ) # Set up multichain workflow with inputs chain = SequentialChain( chains=[recipe_name_chain], input_variables=["ingredients"], output_variables=["recipe_name"], ) # Execute workflow and get response response = chain({"ingredients": selected_items}) return response def generate_recipe(recipe_name): """ Generate a comprehensive recipe using a name of a recipe as input by executing an LLMChain Args: recipe_name (str): The name of the recipe to be generated Returns: dict: A recipe (as a dictionary object) """ # Set up prompt template prompt_template_recipe = PromptTemplate( input_variables=["recipe_name"], template="Generate a recipe for {recipe_name}. Please include a list of ingredients and " "step-by-step instructions for preparing {recipe_name}. " "Please include the cooking time and any special instructions.", ) # Set up chain recipe_chain = LLMChain(llm=llm, prompt=prompt_template_recipe, output_key="recipe") # Set up multichain workflow with inputs chain = SequentialChain( chains=[recipe_chain], input_variables=["recipe_name"], output_variables=["recipe"], ) # Execute workflow and get response response = chain({"recipe_name": recipe_name}) return response
[ "langchain.chains.SequentialChain", "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate", "langchain.llms.OpenAI" ]
[((303, 316), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (314, 316), False, 'from dotenv import load_dotenv\n'), ((348, 375), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (357, 375), False, 'import os\n'), ((785, 808), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.6)'}), '(temperature=0.6)\n', (791, 808), False, 'from langchain.llms import OpenAI\n'), ((1201, 1572), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['ingredients']", 'template': '"""Generate a list of meal names that can be prepared using the provided ingredients. Ingredients are {ingredients}. It\'s not necessary to use all of the ingredients, and the list can include both simple and complex meal names. Please consider the ingredients provided and suggest meal names accordingly."""'}), '(input_variables=[\'ingredients\'], template=\n "Generate a list of meal names that can be prepared using the provided ingredients. Ingredients are {ingredients}. It\'s not necessary to use all of the ingredients, and the list can include both simple and complex meal names. Please consider the ingredients provided and suggest meal names accordingly."\n )\n', (1215, 1572), False, 'from langchain.prompts import PromptTemplate\n'), ((1674, 1753), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_recipe_name', 'output_key': '"""recipe_name"""'}), "(llm=llm, prompt=prompt_template_recipe_name, output_key='recipe_name')\n", (1682, 1753), False, 'from langchain.chains import LLMChain\n'), ((1826, 1940), 'langchain.chains.SequentialChain', 'SequentialChain', ([], {'chains': '[recipe_name_chain]', 'input_variables': "['ingredients']", 'output_variables': "['recipe_name']"}), "(chains=[recipe_name_chain], input_variables=['ingredients'],\n output_variables=['recipe_name'])\n", (1841, 1940), False, 'from langchain.chains import SequentialChain\n'), ((2433, 2697), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['recipe_name']", 'template': '"""Generate a recipe for {recipe_name}. Please include a list of ingredients and step-by-step instructions for preparing {recipe_name}. Please include the cooking time and any special instructions."""'}), "(input_variables=['recipe_name'], template=\n 'Generate a recipe for {recipe_name}. Please include a list of ingredients and step-by-step instructions for preparing {recipe_name}. Please include the cooking time and any special instructions.'\n )\n", (2447, 2697), False, 'from langchain.prompts import PromptTemplate\n'), ((2772, 2841), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_recipe', 'output_key': '"""recipe"""'}), "(llm=llm, prompt=prompt_template_recipe, output_key='recipe')\n", (2780, 2841), False, 'from langchain.chains import LLMChain\n'), ((2900, 3004), 'langchain.chains.SequentialChain', 'SequentialChain', ([], {'chains': '[recipe_chain]', 'input_variables': "['recipe_name']", 'output_variables': "['recipe']"}), "(chains=[recipe_chain], input_variables=['recipe_name'],\n output_variables=['recipe'])\n", (2915, 3004), False, 'from langchain.chains import SequentialChain\n')]
from langchain.vectorstores import FAISS from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.document_loaders import DirectoryLoader, TextLoader import bibtexparser import langchain import os import glob from dotenv import load_dotenv import openai import constants import time # Set OpenAI API Key load_dotenv() os.environ["OPENAI_API_KEY"] = constants.APIKEY openai.api_key = constants.APIKEY # Set paths source_path = './data/src/' destination_file = './data/ingested.txt' store_path = './vectorstore/' bibtex_file_path = '/home/wouter/Tools/Zotero/bibtex/library.bib' # Load documents print("===Loading documents===") text_loader_kwargs={'autodetect_encoding': True} loader = DirectoryLoader(source_path, show_progress=True, use_multithreading=True, loader_cls=TextLoader, loader_kwargs=text_loader_kwargs) documents = loader.load() if len(documents) == 0: print("No new documents found") quit() # Add metadata based in bibliographic information print("===Adding metadata===") # Read the BibTeX file with open(bibtex_file_path) as bibtex_file: bib_database = bibtexparser.load(bibtex_file) # Get a list of all text file names in the directory text_file_names = os.listdir(source_path) metadata_store = [] # Go through each entry in the BibTeX file for entry in bib_database.entries: # Check if the 'file' key exists in the entry if 'file' in entry: # Extract the file name from the 'file' field and remove the extension pdf_file_name = os.path.basename(entry['file']).replace('.pdf', '') # Check if there is a text file with the same name if f'{pdf_file_name}.txt' in text_file_names: # If a match is found, append the metadata to the list metadata_store.append(entry) for document in documents: for entry in metadata_store: doc_name = os.path.basename(document.metadata['source']).replace('.txt', '') ent_name = os.path.basename(entry['file']).replace('.pdf', '') if doc_name == ent_name: document.metadata.update(entry) # Initialize text splitter print("===Splitting documents into chunks===") text_splitter = RecursiveCharacterTextSplitter( chunk_size = 1500, chunk_overlap = 150, length_function = len, add_start_index = True, ) split_documents = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings( show_progress_bar=True, request_timeout=60, ) print("===Embedding text and creating database===") new_db = FAISS.from_documents(split_documents, embeddings) print("===Merging new and old database===") old_db = FAISS.load_local(store_path, embeddings) old_db.merge_from(new_db) old_db.save_local(store_path, "index") # Record the files that we have added print("===Recording ingested files===") with open(destination_file, 'a') as f: for document in documents: f.write(os.path.basename(document.metadata['source'])) f.write('\n')
[ "langchain.document_loaders.DirectoryLoader", "langchain.vectorstores.FAISS.load_local", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.vectorstores.FAISS.from_documents", "langchain.embeddings.OpenAIEmbeddings" ]
[((380, 393), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (391, 393), False, 'from dotenv import load_dotenv\n'), ((764, 898), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['source_path'], {'show_progress': '(True)', 'use_multithreading': '(True)', 'loader_cls': 'TextLoader', 'loader_kwargs': 'text_loader_kwargs'}), '(source_path, show_progress=True, use_multithreading=True,\n loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n', (779, 898), False, 'from langchain.document_loaders import DirectoryLoader, TextLoader\n'), ((1364, 1387), 'os.listdir', 'os.listdir', (['source_path'], {}), '(source_path)\n', (1374, 1387), False, 'import os\n'), ((2324, 2437), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1500)', 'chunk_overlap': '(150)', 'length_function': 'len', 'add_start_index': '(True)'}), '(chunk_size=1500, chunk_overlap=150,\n length_function=len, add_start_index=True)\n', (2354, 2437), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2536, 2596), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'show_progress_bar': '(True)', 'request_timeout': '(60)'}), '(show_progress_bar=True, request_timeout=60)\n', (2552, 2596), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2670, 2719), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['split_documents', 'embeddings'], {}), '(split_documents, embeddings)\n', (2690, 2719), False, 'from langchain.vectorstores import FAISS\n'), ((2774, 2814), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['store_path', 'embeddings'], {}), '(store_path, embeddings)\n', (2790, 2814), False, 'from langchain.vectorstores import FAISS\n'), ((1261, 1291), 'bibtexparser.load', 'bibtexparser.load', (['bibtex_file'], {}), '(bibtex_file)\n', (1278, 1291), False, 'import bibtexparser\n'), ((3045, 3090), 'os.path.basename', 'os.path.basename', (["document.metadata['source']"], {}), "(document.metadata['source'])\n", (3061, 3090), False, 'import os\n'), ((1664, 1695), 'os.path.basename', 'os.path.basename', (["entry['file']"], {}), "(entry['file'])\n", (1680, 1695), False, 'import os\n'), ((2019, 2064), 'os.path.basename', 'os.path.basename', (["document.metadata['source']"], {}), "(document.metadata['source'])\n", (2035, 2064), False, 'import os\n'), ((2104, 2135), 'os.path.basename', 'os.path.basename', (["entry['file']"], {}), "(entry['file'])\n", (2120, 2135), False, 'import os\n')]
from llama_index import ( ServiceContext, SimpleDirectoryReader, StorageContext, VectorStoreIndex, ) from llama_index.vector_stores.qdrant import QdrantVectorStore from tqdm import tqdm import arxiv import os import argparse import yaml import qdrant_client from langchain.embeddings.huggingface import HuggingFaceEmbeddings from llama_index.embeddings import LangchainEmbedding from llama_index import ServiceContext from llama_index.llms import Ollama class Data: def __init__(self, config): self.config = config def _create_data_folder(self, download_path): data_path = download_path if not os.path.exists(data_path): os.makedirs(self.config["data_path"]) print("Output folder created") else: print("Output folder already exists.") def download_papers(self, search_query, download_path, max_results): self._create_data_folder(download_path) client = arxiv.Client() search = arxiv.Search( query=search_query, max_results=max_results, sort_by=arxiv.SortCriterion.SubmittedDate, ) results = list(client.results(search)) for paper in tqdm(results): if os.path.exists(download_path): paper_title = (paper.title).replace(" ", "_") paper.download_pdf(dirpath=download_path, filename=f"{paper_title}.pdf") print(f"{paper.title} Downloaded.") def ingest(self, embedder, llm): print("Indexing data...") documents = SimpleDirectoryReader(self.config["data_path"]).load_data() client = qdrant_client.QdrantClient(url=self.config["qdrant_url"]) qdrant_vector_store = QdrantVectorStore( client=client, collection_name=self.config["collection_name"] ) storage_context = StorageContext.from_defaults(vector_store=qdrant_vector_store) # service_context = ServiceContext.from_defaults( # llm=llm, embed_model=embedder, chunk_size=self.config["chunk_size"] # ) service_context = ServiceContext.from_defaults( llm=None, embed_model=embedder, chunk_size=self.config["chunk_size"] ) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context, service_context=service_context ) print( f"Data indexed successfully to Qdrant. Collection: {self.config['collection_name']}" ) return index if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-q", "--query", type=str, default=False, help="Download papers from arxiv with this query.", ) # parser.add_argument( # "-o", "--output", type=str, default=False, help="Download path." # ) parser.add_argument( "-m", "--max", type=int, default=False, help="Max results to download." ) parser.add_argument( "-i", "--ingest", action=argparse.BooleanOptionalAction, default=False, help="Ingest data to Qdrant vector Database.", ) args = parser.parse_args() config_file = "config.yml" with open(config_file, "r") as conf: config = yaml.safe_load(conf) data = Data(config) if args.query: data.download_papers( search_query=args.query, download_path=config["data_path"], max_results=args.max, ) if args.ingest: print("Loading Embedder...") embed_model = LangchainEmbedding( HuggingFaceEmbeddings(model_name=config["embedding_model"]) ) llm = Ollama(model=config["llm_name"], base_url=config["llm_url"]) data.ingest(embedder=embed_model, llm=llm)
[ "langchain.embeddings.huggingface.HuggingFaceEmbeddings" ]
[((2566, 2591), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2589, 2591), False, 'import argparse\n'), ((970, 984), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (982, 984), False, 'import arxiv\n'), ((1003, 1108), 'arxiv.Search', 'arxiv.Search', ([], {'query': 'search_query', 'max_results': 'max_results', 'sort_by': 'arxiv.SortCriterion.SubmittedDate'}), '(query=search_query, max_results=max_results, sort_by=arxiv.\n SortCriterion.SubmittedDate)\n', (1015, 1108), False, 'import arxiv\n'), ((1220, 1233), 'tqdm.tqdm', 'tqdm', (['results'], {}), '(results)\n', (1224, 1233), False, 'from tqdm import tqdm\n'), ((1654, 1711), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'url': "self.config['qdrant_url']"}), "(url=self.config['qdrant_url'])\n", (1680, 1711), False, 'import qdrant_client\n'), ((1742, 1827), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': "self.config['collection_name']"}), "(client=client, collection_name=self.config['collection_name']\n )\n", (1759, 1827), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((1871, 1933), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'qdrant_vector_store'}), '(vector_store=qdrant_vector_store)\n', (1899, 1933), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, StorageContext, VectorStoreIndex\n'), ((2112, 2215), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'embedder', 'chunk_size': "self.config['chunk_size']"}), "(llm=None, embed_model=embedder, chunk_size=\n self.config['chunk_size'])\n", (2140, 2215), False, 'from llama_index import ServiceContext\n'), ((2250, 2362), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(documents, storage_context=storage_context,\n service_context=service_context)\n', (2281, 2362), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, StorageContext, VectorStoreIndex\n'), ((3283, 3303), 'yaml.safe_load', 'yaml.safe_load', (['conf'], {}), '(conf)\n', (3297, 3303), False, 'import yaml\n'), ((3700, 3760), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': "config['llm_name']", 'base_url': "config['llm_url']"}), "(model=config['llm_name'], base_url=config['llm_url'])\n", (3706, 3760), False, 'from llama_index.llms import Ollama\n'), ((646, 671), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (660, 671), False, 'import os\n'), ((685, 722), 'os.makedirs', 'os.makedirs', (["self.config['data_path']"], {}), "(self.config['data_path'])\n", (696, 722), False, 'import os\n'), ((1250, 1279), 'os.path.exists', 'os.path.exists', (['download_path'], {}), '(download_path)\n', (1264, 1279), False, 'import os\n'), ((3616, 3675), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': "config['embedding_model']"}), "(model_name=config['embedding_model'])\n", (3637, 3675), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1576, 1623), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (["self.config['data_path']"], {}), "(self.config['data_path'])\n", (1597, 1623), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, StorageContext, VectorStoreIndex\n')]
import langchain_helper as lch import streamlit as st st.title('Pets Name Generator') user_animal_type = st.sidebar.selectbox("What is your pet?",("Cat", "Dog","Rabbit","Hamster",)) user_pet_color = st.sidebar.text_area(f"What color is your {user_animal_type}?",max_chars=15) if user_pet_color: response = lch.generate_pet_name(user_animal_type,user_pet_color) st.text(response['pet_name'])
[ "langchain_helper.generate_pet_name" ]
[((56, 87), 'streamlit.title', 'st.title', (['"""Pets Name Generator"""'], {}), "('Pets Name Generator')\n", (64, 87), True, 'import streamlit as st\n'), ((108, 186), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""What is your pet?"""', "('Cat', 'Dog', 'Rabbit', 'Hamster')"], {}), "('What is your pet?', ('Cat', 'Dog', 'Rabbit', 'Hamster'))\n", (128, 186), True, 'import streamlit as st\n'), ((203, 280), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['f"""What color is your {user_animal_type}?"""'], {'max_chars': '(15)'}), "(f'What color is your {user_animal_type}?', max_chars=15)\n", (223, 280), True, 'import streamlit as st\n'), ((315, 370), 'langchain_helper.generate_pet_name', 'lch.generate_pet_name', (['user_animal_type', 'user_pet_color'], {}), '(user_animal_type, user_pet_color)\n', (336, 370), True, 'import langchain_helper as lch\n'), ((374, 403), 'streamlit.text', 'st.text', (["response['pet_name']"], {}), "(response['pet_name'])\n", (381, 403), True, 'import streamlit as st\n')]
import logging import os import pickle import tempfile import streamlit as st from dotenv import load_dotenv from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes from langchain.callbacks import StdOutCallbackHandler from langchain.chains.question_answering import load_qa_chain from langchain.document_loaders import PyPDFLoader from langchain.embeddings import (HuggingFaceHubEmbeddings, HuggingFaceInstructEmbeddings) from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import FAISS, Chroma from PIL import Image from langChainInterface import LangChainInterface # Most GENAI logs are at Debug level. logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG")) st.set_page_config( page_title="Retrieval Augmented Generation", page_icon="🧊", layout="wide", initial_sidebar_state="expanded" ) st.header("Retrieval Augmented Generation with watsonx.ai 💬") # chunk_size=1500 # chunk_overlap = 200 load_dotenv() handler = StdOutCallbackHandler() api_key = os.getenv("API_KEY", None) ibm_cloud_url = os.getenv("IBM_CLOUD_URL", None) project_id = os.getenv("PROJECT_ID", None) if api_key is None or ibm_cloud_url is None or project_id is None: print("Ensure you copied the .env file that you created earlier into the same directory as this notebook") else: creds = { "url": ibm_cloud_url, "apikey": api_key } GEN_API_KEY = os.getenv("GENAI_KEY", None) # Sidebar contents with st.sidebar: st.title("RAG App") st.markdown(''' ## About This app is an LLM-powered RAG built using: - [IBM Generative AI SDK](https://github.com/IBM/ibm-generative-ai/) - [HuggingFace](https://huggingface.co/) - [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai) LLM model ''') st.write('Powered by [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai)') image = Image.open('watsonxai.jpg') st.image(image, caption='Powered by watsonx.ai') max_new_tokens= st.number_input('max_new_tokens',1,1024,value=300) min_new_tokens= st.number_input('min_new_tokens',0,value=15) repetition_penalty = st.number_input('repetition_penalty',1,2,value=2) decoding = st.text_input( "Decoding", "greedy", key="placeholder", ) uploaded_files = st.file_uploader("Choose a PDF file", accept_multiple_files=True) @st.cache_data def read_pdf(uploaded_files,chunk_size =250,chunk_overlap=20): for uploaded_file in uploaded_files: bytes_data = uploaded_file.read() with tempfile.NamedTemporaryFile(mode='wb', delete=False) as temp_file: # Write content to the temporary file temp_file.write(bytes_data) filepath = temp_file.name with st.spinner('Waiting for the file to upload'): loader = PyPDFLoader(filepath) data = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size= chunk_size, chunk_overlap=chunk_overlap) docs = text_splitter.split_documents(data) return docs @st.cache_data def read_push_embeddings(): embeddings = HuggingFaceHubEmbeddings(repo_id="sentence-transformers/all-MiniLM-L6-v2") if os.path.exists("db.pickle"): with open("db.pickle",'rb') as file_name: db = pickle.load(file_name) else: db = FAISS.from_documents(docs, embeddings) with open('db.pickle','wb') as file_name : pickle.dump(db,file_name) st.write("\n") return db # show user input if user_question := st.text_input( "Ask a question about your Policy Document:" ): docs = read_pdf(uploaded_files) db = read_push_embeddings() docs = db.similarity_search(user_question) params = { GenParams.DECODING_METHOD: "greedy", GenParams.MIN_NEW_TOKENS: 30, GenParams.MAX_NEW_TOKENS: 300, GenParams.TEMPERATURE: 0.0, # GenParams.TOP_K: 100, # GenParams.TOP_P: 1, GenParams.REPETITION_PENALTY: 1 } model_llm = LangChainInterface(model=ModelTypes.LLAMA_2_70B_CHAT.value, credentials=creds, params=params, project_id=project_id) chain = load_qa_chain(model_llm, chain_type="stuff") response = chain.run(input_documents=docs, question=user_question) st.text_area(label="Model Response", value=response, height=100) st.write()
[ "langchain.chains.question_answering.load_qa_chain", "langchain.embeddings.HuggingFaceHubEmbeddings", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.callbacks.StdOutCallbackHandler", "langchain.vectorstores.FAISS.from_documents", "langchain.document_loaders.PyPDFLoader" ]
[((861, 993), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Retrieval Augmented Generation"""', 'page_icon': '"""🧊"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""expanded"""'}), "(page_title='Retrieval Augmented Generation', page_icon=\n '🧊', layout='wide', initial_sidebar_state='expanded')\n", (879, 993), True, 'import streamlit as st\n'), ((1007, 1068), 'streamlit.header', 'st.header', (['"""Retrieval Augmented Generation with watsonx.ai 💬"""'], {}), "('Retrieval Augmented Generation with watsonx.ai 💬')\n", (1016, 1068), True, 'import streamlit as st\n'), ((1110, 1123), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1121, 1123), False, 'from dotenv import load_dotenv\n'), ((1135, 1158), 'langchain.callbacks.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (1156, 1158), False, 'from langchain.callbacks import StdOutCallbackHandler\n'), ((1170, 1196), 'os.getenv', 'os.getenv', (['"""API_KEY"""', 'None'], {}), "('API_KEY', None)\n", (1179, 1196), False, 'import os\n'), ((1213, 1245), 'os.getenv', 'os.getenv', (['"""IBM_CLOUD_URL"""', 'None'], {}), "('IBM_CLOUD_URL', None)\n", (1222, 1245), False, 'import os\n'), ((1259, 1288), 'os.getenv', 'os.getenv', (['"""PROJECT_ID"""', 'None'], {}), "('PROJECT_ID', None)\n", (1268, 1288), False, 'import os\n'), ((1566, 1594), 'os.getenv', 'os.getenv', (['"""GENAI_KEY"""', 'None'], {}), "('GENAI_KEY', None)\n", (1575, 1594), False, 'import os\n'), ((2468, 2533), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a PDF file"""'], {'accept_multiple_files': '(True)'}), "('Choose a PDF file', accept_multiple_files=True)\n", (2484, 2533), True, 'import streamlit as st\n'), ((1636, 1655), 'streamlit.title', 'st.title', (['"""RAG App"""'], {}), "('RAG App')\n", (1644, 1655), True, 'import streamlit as st\n'), ((1660, 1949), 'streamlit.markdown', 'st.markdown', (['"""\n ## About\n This app is an LLM-powered RAG built using:\n - [IBM Generative AI SDK](https://github.com/IBM/ibm-generative-ai/)\n - [HuggingFace](https://huggingface.co/)\n - [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai) LLM model\n \n """'], {}), '(\n """\n ## About\n This app is an LLM-powered RAG built using:\n - [IBM Generative AI SDK](https://github.com/IBM/ibm-generative-ai/)\n - [HuggingFace](https://huggingface.co/)\n - [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai) LLM model\n \n """\n )\n', (1671, 1949), True, 'import streamlit as st\n'), ((1944, 2029), 'streamlit.write', 'st.write', (['"""Powered by [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai)"""'], {}), "('Powered by [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai)'\n )\n", (1952, 2029), True, 'import streamlit as st\n'), ((2037, 2064), 'PIL.Image.open', 'Image.open', (['"""watsonxai.jpg"""'], {}), "('watsonxai.jpg')\n", (2047, 2064), False, 'from PIL import Image\n'), ((2069, 2117), 'streamlit.image', 'st.image', (['image'], {'caption': '"""Powered by watsonx.ai"""'}), "(image, caption='Powered by watsonx.ai')\n", (2077, 2117), True, 'import streamlit as st\n'), ((2138, 2191), 'streamlit.number_input', 'st.number_input', (['"""max_new_tokens"""', '(1)', '(1024)'], {'value': '(300)'}), "('max_new_tokens', 1, 1024, value=300)\n", (2153, 2191), True, 'import streamlit as st\n'), ((2209, 2255), 'streamlit.number_input', 'st.number_input', (['"""min_new_tokens"""', '(0)'], {'value': '(15)'}), "('min_new_tokens', 0, value=15)\n", (2224, 2255), True, 'import streamlit as st\n'), ((2279, 2331), 'streamlit.number_input', 'st.number_input', (['"""repetition_penalty"""', '(1)', '(2)'], {'value': '(2)'}), "('repetition_penalty', 1, 2, value=2)\n", (2294, 2331), True, 'import streamlit as st\n'), ((2344, 2398), 'streamlit.text_input', 'st.text_input', (['"""Decoding"""', '"""greedy"""'], {'key': '"""placeholder"""'}), "('Decoding', 'greedy', key='placeholder')\n", (2357, 2398), True, 'import streamlit as st\n'), ((3284, 3358), 'langchain.embeddings.HuggingFaceHubEmbeddings', 'HuggingFaceHubEmbeddings', ([], {'repo_id': '"""sentence-transformers/all-MiniLM-L6-v2"""'}), "(repo_id='sentence-transformers/all-MiniLM-L6-v2')\n", (3308, 3358), False, 'from langchain.embeddings import HuggingFaceHubEmbeddings, HuggingFaceInstructEmbeddings\n'), ((3366, 3393), 'os.path.exists', 'os.path.exists', (['"""db.pickle"""'], {}), "('db.pickle')\n", (3380, 3393), False, 'import os\n'), ((3719, 3778), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your Policy Document:"""'], {}), "('Ask a question about your Policy Document:')\n", (3732, 3778), True, 'import streamlit as st\n'), ((4198, 4319), 'langChainInterface.LangChainInterface', 'LangChainInterface', ([], {'model': 'ModelTypes.LLAMA_2_70B_CHAT.value', 'credentials': 'creds', 'params': 'params', 'project_id': 'project_id'}), '(model=ModelTypes.LLAMA_2_70B_CHAT.value, credentials=\n creds, params=params, project_id=project_id)\n', (4216, 4319), False, 'from langChainInterface import LangChainInterface\n'), ((4327, 4371), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['model_llm'], {'chain_type': '"""stuff"""'}), "(model_llm, chain_type='stuff')\n", (4340, 4371), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((4449, 4513), 'streamlit.text_area', 'st.text_area', ([], {'label': '"""Model Response"""', 'value': 'response', 'height': '(100)'}), "(label='Model Response', value=response, height=100)\n", (4461, 4513), True, 'import streamlit as st\n'), ((4518, 4528), 'streamlit.write', 'st.write', ([], {}), '()\n', (4526, 4528), True, 'import streamlit as st\n'), ((823, 858), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""DEBUG"""'], {}), "('LOGLEVEL', 'DEBUG')\n", (837, 858), False, 'import os\n'), ((3513, 3551), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (3533, 3551), False, 'from langchain.vectorstores import FAISS, Chroma\n'), ((3651, 3665), 'streamlit.write', 'st.write', (['"""\n"""'], {}), "('\\n')\n", (3659, 3665), True, 'import streamlit as st\n'), ((2705, 2757), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""wb"""', 'delete': '(False)'}), "(mode='wb', delete=False)\n", (2732, 2757), False, 'import tempfile\n'), ((3462, 3484), 'pickle.load', 'pickle.load', (['file_name'], {}), '(file_name)\n', (3473, 3484), False, 'import pickle\n'), ((3617, 3643), 'pickle.dump', 'pickle.dump', (['db', 'file_name'], {}), '(db, file_name)\n', (3628, 3643), False, 'import pickle\n'), ((2905, 2949), 'streamlit.spinner', 'st.spinner', (['"""Waiting for the file to upload"""'], {}), "('Waiting for the file to upload')\n", (2915, 2949), True, 'import streamlit as st\n'), ((2973, 2994), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['filepath'], {}), '(filepath)\n', (2984, 2994), False, 'from langchain.document_loaders import PyPDFLoader\n'), ((3058, 3145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap)\n', (3088, 3145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n')]
from langchain_google_genai import ChatGoogleGenerativeAI from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, ) from langchain.chains import LLMChain, ConversationChain from langchain.memory import ConversationBufferMemory import getpass import os import langchain import readline # NOTE: 导入 readline 可以解决 input 中 Delete 键问题 if "GOOGLE_API_KEY" not in os.environ: os.environ["GOOGLE_API_KEY"] = getpass.getpass( "Provide your Google API Key") prompt = ChatPromptTemplate(messages=[ SystemMessagePromptTemplate.from_template( "You are a nice chatbot having a conversation with a human."), # The `variable_name` here is what must align with memory MessagesPlaceholder(variable_name="chat_history"), HumanMessagePromptTemplate.from_template("{text}"), ]) chat = ChatGoogleGenerativeAI(model="gemini-pro", convert_system_message_to_human=True) memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) verbose = "DEBUG" in os.environ # 链式方式一: # conversation = chat_prompt | chat # 使用 conversation.invoke, 结果: {content: string} # 链式方式二: conversation = LLMChain(llm=chat, prompt=prompt, verbose=verbose, memory=memory) # 使用 conversation, 结果: string class Color: PURPLE = '\033[95m' CYAN = '\033[96m' DARKCYAN = '\033[36m' BLUE = '\033[94m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' END = '\033[0m' while True: try: message = input(f"{Color.BLUE}✎✎✎ {Color.END}") if message.lower() == "exit": exit() result = conversation({ "text": message, }) print(f"{result['text']}") except KeyboardInterrupt: exit()
[ "langchain_google_genai.ChatGoogleGenerativeAI", "langchain.prompts.chat.SystemMessagePromptTemplate.from_template", "langchain.memory.ConversationBufferMemory", "langchain.prompts.chat.MessagesPlaceholder", "langchain.prompts.chat.HumanMessagePromptTemplate.from_template", "langchain.chains.LLMChain" ]
[((898, 983), 'langchain_google_genai.ChatGoogleGenerativeAI', 'ChatGoogleGenerativeAI', ([], {'model': '"""gemini-pro"""', 'convert_system_message_to_human': '(True)'}), "(model='gemini-pro', convert_system_message_to_human=True\n )\n", (920, 983), False, 'from langchain_google_genai import ChatGoogleGenerativeAI\n'), ((1019, 1092), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (1043, 1092), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1277, 1342), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'chat', 'prompt': 'prompt', 'verbose': 'verbose', 'memory': 'memory'}), '(llm=chat, prompt=prompt, verbose=verbose, memory=memory)\n', (1285, 1342), False, 'from langchain.chains import LLMChain, ConversationChain\n'), ((500, 546), 'getpass.getpass', 'getpass.getpass', (['"""Provide your Google API Key"""'], {}), "('Provide your Google API Key')\n", (515, 546), False, 'import getpass\n'), ((600, 708), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['"""You are a nice chatbot having a conversation with a human."""'], {}), "(\n 'You are a nice chatbot having a conversation with a human.')\n", (641, 708), False, 'from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate\n'), ((780, 829), 'langchain.prompts.chat.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""chat_history"""'}), "(variable_name='chat_history')\n", (799, 829), False, 'from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate\n'), ((835, 885), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (875, 885), False, 'from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate\n')]
import os import re from typing import Optional import langchain import paperqa import paperscraper from langchain import SerpAPIWrapper, OpenAI from langchain.base_language import BaseLanguageModel from langchain.chains import LLMChain from langchain.tools import BaseTool from pydantic import validator from pypdf.errors import PdfReadError class LitSearch(BaseTool): name = "LiteratureSearch" description = ( "Input a specific question, returns an answer from literature search. " "Do not mention any specific molecule names, but use more general features to formulate your questions." ) llm: BaseLanguageModel query_chain: Optional[LLMChain] = None pdir: str = "query" searches: int = 2 verobse: bool = False docs: Optional[paperqa.Docs] = None @validator("query_chain", always=True) def init_query_chain(cls, v, values): if v is None: search_prompt = langchain.prompts.PromptTemplate( input_variables=["question", "count"], template="We want to answer the following question: {question} \n" "Provide {count} keyword searches (one search per line) " "that will find papers to help answer the question. " "Do not use boolean operators. " "Make some searches broad and some narrow. " "Do not use boolean operators or quotes.\n\n" "1. ", ) llm = OpenAI(temperature=0) v = LLMChain(llm=llm, prompt=search_prompt) return v @validator("pdir", always=True) def init_pdir(cls, v): if not os.path.isdir(v): os.mkdir(v) return v def paper_search(self, search): try: return paperscraper.search_papers( search, pdir=self.pdir, batch_size=6, limit=4, verbose=False ) except KeyError: return {} def _run(self, query: str) -> str: if self.verbose: print("\n\nChoosing search terms\n1. ", end="") searches = self.query_chain.run(question=query, count=self.searches) print("") queries = [s for s in searches.split("\n") if len(s) > 3] # remove 2., 3. from queries queries = [re.sub(r"^\d+\.\s*", "", q) for q in queries] # remove quotes queries = [re.sub(r"\"", "", q) for q in queries] papers = {} for q in queries: papers.update(self.paper_search(q)) if self.verbose: print(f"retrieved {len(papers)} papers total") if len(papers) == 0: return "Not enough papers found" if self.docs is None: self.docs = paperqa.Docs( llm=self.llm, summary_llm="gpt-3.5-turbo", memory=True ) not_loaded = 0 for path, data in papers.items(): try: self.docs.add(path, citation=data["citation"], docname=data["key"]) except (ValueError, PdfReadError): not_loaded += 1 if not_loaded: print(f"\nFound {len(papers.items())} papers, couldn't load {not_loaded}") return self.docs.query(query, length_prompt="about 100 words").answer async def _arun(self, query: str) -> str: """Use the tool asynchronously.""" raise NotImplementedError() class WebSearch(BaseTool): name = "WebSearch" description = ( "Input search query, returns snippets from web search. " "Prefer LitSearch tool over this tool, except for simple questions." ) serpapi: SerpAPIWrapper = None def __init__(self, search_engine="google"): super(WebSearch, self).__init__() self.serpapi = SerpAPIWrapper( serpapi_api_key=os.getenv("SERP_API_KEY"), search_engine=search_engine ) def _run(self, query: str) -> str: try: return self.serpapi.run(query) except: return "No results, try another search" async def _arun(self, query: str) -> str: """Use the tool asynchronously.""" raise NotImplementedError()
[ "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate", "langchain.OpenAI" ]
[((810, 847), 'pydantic.validator', 'validator', (['"""query_chain"""'], {'always': '(True)'}), "('query_chain', always=True)\n", (819, 847), False, 'from pydantic import validator\n'), ((1585, 1615), 'pydantic.validator', 'validator', (['"""pdir"""'], {'always': '(True)'}), "('pdir', always=True)\n", (1594, 1615), False, 'from pydantic import validator\n'), ((940, 1318), 'langchain.prompts.PromptTemplate', 'langchain.prompts.PromptTemplate', ([], {'input_variables': "['question', 'count']", 'template': '"""We want to answer the following question: {question} \nProvide {count} keyword searches (one search per line) that will find papers to help answer the question. Do not use boolean operators. Make some searches broad and some narrow. Do not use boolean operators or quotes.\n\n1. """'}), '(input_variables=[\'question\', \'count\'],\n template=\n """We want to answer the following question: {question} \nProvide {count} keyword searches (one search per line) that will find papers to help answer the question. Do not use boolean operators. Make some searches broad and some narrow. Do not use boolean operators or quotes.\n\n1. """\n )\n', (972, 1318), False, 'import langchain\n'), ((1484, 1505), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1490, 1505), False, 'from langchain import SerpAPIWrapper, OpenAI\n'), ((1522, 1561), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'search_prompt'}), '(llm=llm, prompt=search_prompt)\n', (1530, 1561), False, 'from langchain.chains import LLMChain\n'), ((1658, 1674), 'os.path.isdir', 'os.path.isdir', (['v'], {}), '(v)\n', (1671, 1674), False, 'import os\n'), ((1688, 1699), 'os.mkdir', 'os.mkdir', (['v'], {}), '(v)\n', (1696, 1699), False, 'import os\n'), ((1786, 1878), 'paperscraper.search_papers', 'paperscraper.search_papers', (['search'], {'pdir': 'self.pdir', 'batch_size': '(6)', 'limit': '(4)', 'verbose': '(False)'}), '(search, pdir=self.pdir, batch_size=6, limit=4,\n verbose=False)\n', (1812, 1878), False, 'import paperscraper\n'), ((2294, 2323), 're.sub', 're.sub', (['"""^\\\\d+\\\\.\\\\s*"""', '""""""', 'q'], {}), "('^\\\\d+\\\\.\\\\s*', '', q)\n", (2300, 2323), False, 'import re\n'), ((2383, 2403), 're.sub', 're.sub', (['"""\\\\\\""""', '""""""', 'q'], {}), '(\'\\\\"\', \'\', q)\n', (2389, 2403), False, 'import re\n'), ((2737, 2805), 'paperqa.Docs', 'paperqa.Docs', ([], {'llm': 'self.llm', 'summary_llm': '"""gpt-3.5-turbo"""', 'memory': '(True)'}), "(llm=self.llm, summary_llm='gpt-3.5-turbo', memory=True)\n", (2749, 2805), False, 'import paperqa\n'), ((3810, 3835), 'os.getenv', 'os.getenv', (['"""SERP_API_KEY"""'], {}), "('SERP_API_KEY')\n", (3819, 3835), False, 'import os\n')]
import sys import getpass from dotenv import load_dotenv, dotenv_values import pandas as pd from IPython.display import display, Markdown, Latex, HTML, JSON import langchain from langchain.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from cmd import PROMPT import os from pyexpat.errors import messages import openai import tiktoken sys.path.append(r"/Users/dovcohen/Documents/Projects/AI/NL2SQL") #from .OpenAI_Func import Num_Tokens_From_String, OpenAI_Embeddings_Cost from ChatGPT.src.lib.OpenAI_Func import Num_Tokens_From_String, OpenAI_Embeddings_Cost from ChatGPT.src.lib.DB_Func import run_query from ChatGPT.src.lib.OpenAI_Func import Prompt_Cost, OpenAI_Usage_Cost ## Vector Datastore from ChatGPT.src.lib.lib_OpenAI_Embeddings import VDS, OpenAI_Embeddings class GenAI_NL2SQL(): def __init__(self, OPENAI_API_KEY, Model, Embedding_Model, Encoding_Base, Max_Tokens, Temperature, \ Token_Cost, DB, MYSQL_User, MYSQL_PWD, VDSDB=None, VDSDB_Filename=None): self._LLM_Model = Model self._Embedding_Model = Embedding_Model self._Encoding_Base = Encoding_Base self._Max_Tokens = Max_Tokens self._Temperature = Temperature self._Token_Cost = Token_Cost self._OpenAI_API_Key = OPENAI_API_KEY self._DB = DB self._MYSQL_Credemtals = {'User':MYSQL_User,'PWD':MYSQL_PWD} self.Set_OpenAI_API_Key() if VDSDB is not None: self._VDSDB = VDSDB self._VDS = VDS(VDSDB_Filename, Encoding_Base, Embedding_Model, Token_Cost, Max_Tokens) self._VDS.Load_VDS_DF(Verbose=True) def Set_OpenAI_API_Key(self): openai.api_key = self._OpenAI_API_Key return 0 def Print_Open_AI_Key(self): print(self._OpenAI_API_Key) def Print_MySQL_Keys(self): print(self._MYSQL_Credemtals) ############################################################################## def Prompt_Question(self, _Prompt_Template_, Inputs): """ """ for i,j in Inputs.items(): Prompt = _Prompt_Template_.replace(i,j) return Prompt ############################################################################## def OpenAI_Completion(self, Prompt): try: #Make your OpenAI API request here response = openai.Completion.create( model=self._LLM_Model, prompt=Prompt, max_tokens=self._Max_Tokens, temperature=self._Temperature, top_p=1, frequency_penalty=0, presence_penalty=0 ) except openai.error.APIError as e: #Handle API error here, e.g. retry or log print(f"OpenAI API returned an API Error: {e}") return -1 except openai.error.APIConnectionError as e: #Handle connection error here print(f"Failed to connect to OpenAI API: {e}") return -1 except openai.error.RateLimitError as e: #Handle rate limit error (we recommend using exponential backoff) print(f"OpenAI API request exceeded rate limit: {e}") return -1 return(response) ############################################################################# def OpenAI_Text_Extraction(self, Response, Type='SQL'): if Type == 'SQL': ## Call prompt that removes extraneaous characters from the returned query Prompt_Template, status = self.Load_Prompt_Template('../prompt_templates/OpenAI_SQL_Extraction.txt') if status == 0: Prompt = self.Prompt_Question(Prompt_Template, Inputs={'{RESPONSE}':str(Response)}) Rtn = self.OpenAI_Completion(Prompt) Txt = str(Rtn['choices'][0]['text']) elif Type == 'Text': Txt = str(Response['choices'][0]['text']) else: print(f'Type: {Type} is Unsupported ') Txt = '' return(Txt) ############################################################################## def Prompt_Query(self, Prompt_Template, Question = '', Verbose=False, Debug=False): status = 0 df = pd.DataFrame() # Construct prompt Prompt = self.Prompt_Question(Prompt_Template,{'{Question}':Question}) # Estimate input prompt cost Cost, Tokens_Used = Prompt_Cost(Prompt, self._LLM_Model, self._Token_Cost, self._Encoding_Base) if Verbose: print('Input') print(f'Total Cost: {round(Cost,3)} Tokens Used {Tokens_Used}') # Send prompt to LLM Response = self.OpenAI_Completion(Prompt) if Debug: print(f'Prompt: \n',Prompt,'\n') print('Response \n',Response,'\n') Cost, Tokens_Used = OpenAI_Usage_Cost(Response, self._LLM_Model, self._Token_Cost ) if Verbose: print('Output') print(f'Total Cost: {round(Cost,3)} Tokens Used {Tokens_Used}','\n') # extract query from LLM response Query = self.OpenAI_Text_Extraction(Response, Type='SQL') if Verbose: print(Query) return Query ############################################################################## # Given an single input question, run the entire process def GPT_Completion(self, Question, Prompt_Template, Correct_Query=False, Correction_Prompt=None, \ Max_Iterations=0,Verbose=False, QueryDB = False, Update_VDS=True): Correct_Query_Iterations = 0 # calculate Question Embedding vector Question_Emb = self._VDS.OpenAI_Get_Embedding(Text=Question, Verbose=True) # Few Shot Prompt - Search VDS for questions that are similar to the question posed # 11/2/2023: Using Cosine simlarity function N_Shot_Prompt_Examples = self._VDS.Search_VDS(Question_Emb, Similarity_Func = 'Cosine', Top_N=1) print(f'N_Shot_Prompt_Examples {N_Shot_Prompt_Examples}') return 0 # Construct prompt if Verbose: print('Call Prompt_Query') Query = self.Prompt_Query(Prompt_Template, Question, Verbose=True) # Test query the DB - if QueryDB: status, df = run_query(Query = Query, Credentials = self._MYSQL_Credemtals, DB=self._DB) # if query was malformed, llm halucianated for example if Correct_Query and (status == -5): while (status == -5) and (Correct_Query_Iterations < Max_Iterations): Correct_Query_Iterations += 1 print('Attempting to correct query syntax error') Query = self.Prompt_Query(Correction_Prompt, Question, Verbose) # Query the DB status, df = run_query(Query = Query, Credentials = self._MYSQL_Credemtals,\ DB=self._DB) print('\n',df) if Update_VDS: rtn = '' while rtn not in ('Y','N'): print(f'Add results to Vector Datastore DB? Y or N') rtn = input('Prompt> Question: ') if rtn == 'Y': self.Insert_VDS(Question=Question, Query=Query, Metadata='', Embedding=Question_Emb) # Return Query return Query ############################################################################## def Load_Prompt_Template(self, File=None): if File: try: with open(File, 'r') as file: Template = file.read().replace('\n', '') Status = 0 except: print(f'Prompt file {File} load failed ') Status = -1 return "", Status return Template, Status ############################################################################# def LangChain_Initiate_LLM(self, Model='OpenAI'): if Model=='OpenAI': self._LLM = OpenAI(temperature=self._Temperature, model_name=self._LLM_Model, \ max_tokens=self._Max_Tokens, openai_api_key=self._OpenAI_API_Key) return 0 else: print('Model Unsupported') return -1 # Langchain Completion def LangChainCompletion(self, Prompt, Input): chain = LLMChain(llm=self._LLM, prompt=Prompt) return chain.run(Input)
[ "langchain.chains.LLMChain", "langchain.llms.OpenAI" ]
[((394, 457), 'sys.path.append', 'sys.path.append', (['"""/Users/dovcohen/Documents/Projects/AI/NL2SQL"""'], {}), "('/Users/dovcohen/Documents/Projects/AI/NL2SQL')\n", (409, 457), False, 'import sys\n'), ((4264, 4278), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4276, 4278), True, 'import pandas as pd\n'), ((4452, 4527), 'ChatGPT.src.lib.OpenAI_Func.Prompt_Cost', 'Prompt_Cost', (['Prompt', 'self._LLM_Model', 'self._Token_Cost', 'self._Encoding_Base'], {}), '(Prompt, self._LLM_Model, self._Token_Cost, self._Encoding_Base)\n', (4463, 4527), False, 'from ChatGPT.src.lib.OpenAI_Func import Prompt_Cost, OpenAI_Usage_Cost\n'), ((4872, 4934), 'ChatGPT.src.lib.OpenAI_Func.OpenAI_Usage_Cost', 'OpenAI_Usage_Cost', (['Response', 'self._LLM_Model', 'self._Token_Cost'], {}), '(Response, self._LLM_Model, self._Token_Cost)\n', (4889, 4934), False, 'from ChatGPT.src.lib.OpenAI_Func import Prompt_Cost, OpenAI_Usage_Cost\n'), ((8369, 8407), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self._LLM', 'prompt': 'Prompt'}), '(llm=self._LLM, prompt=Prompt)\n', (8377, 8407), False, 'from langchain.chains import LLMChain\n'), ((1546, 1621), 'ChatGPT.src.lib.lib_OpenAI_Embeddings.VDS', 'VDS', (['VDSDB_Filename', 'Encoding_Base', 'Embedding_Model', 'Token_Cost', 'Max_Tokens'], {}), '(VDSDB_Filename, Encoding_Base, Embedding_Model, Token_Cost, Max_Tokens)\n', (1549, 1621), False, 'from ChatGPT.src.lib.lib_OpenAI_Embeddings import VDS, OpenAI_Embeddings\n'), ((2404, 2585), 'openai.Completion.create', 'openai.Completion.create', ([], {'model': 'self._LLM_Model', 'prompt': 'Prompt', 'max_tokens': 'self._Max_Tokens', 'temperature': 'self._Temperature', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)'}), '(model=self._LLM_Model, prompt=Prompt, max_tokens=\n self._Max_Tokens, temperature=self._Temperature, top_p=1,\n frequency_penalty=0, presence_penalty=0)\n', (2428, 2585), False, 'import openai\n'), ((6308, 6379), 'ChatGPT.src.lib.DB_Func.run_query', 'run_query', ([], {'Query': 'Query', 'Credentials': 'self._MYSQL_Credemtals', 'DB': 'self._DB'}), '(Query=Query, Credentials=self._MYSQL_Credemtals, DB=self._DB)\n', (6317, 6379), False, 'from ChatGPT.src.lib.DB_Func import run_query\n'), ((8017, 8152), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': 'self._Temperature', 'model_name': 'self._LLM_Model', 'max_tokens': 'self._Max_Tokens', 'openai_api_key': 'self._OpenAI_API_Key'}), '(temperature=self._Temperature, model_name=self._LLM_Model,\n max_tokens=self._Max_Tokens, openai_api_key=self._OpenAI_API_Key)\n', (8023, 8152), False, 'from langchain.llms import OpenAI\n'), ((6851, 6922), 'ChatGPT.src.lib.DB_Func.run_query', 'run_query', ([], {'Query': 'Query', 'Credentials': 'self._MYSQL_Credemtals', 'DB': 'self._DB'}), '(Query=Query, Credentials=self._MYSQL_Credemtals, DB=self._DB)\n', (6860, 6922), False, 'from ChatGPT.src.lib.DB_Func import run_query\n')]
# %% import logging import sys # logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) # %% from llama_index import (SimpleDirectoryReader, LLMPredictor, ServiceContext, KnowledgeGraphIndex) # from llama_index.graph_stores import SimpleGraphStore from llama_index.storage.storage_context import StorageContext from llama_index.llms import HuggingFaceInferenceAPI from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings from llama_index.embeddings import LangchainEmbedding from pyvis.network import Network from dotenv import load_dotenv import os # %% load_dotenv() HF_TOKEN = "HF_TOKEN" llm = HuggingFaceInferenceAPI( model_name="mistralai/Mistral-7B-Instruct-v0.2", token=HF_TOKEN ) # %% print(llm) # %% embed_model = LangchainEmbedding( HuggingFaceInferenceAPIEmbeddings(api_key=HF_TOKEN, model_name="thenlper/gte-large") ) # %% documents = SimpleDirectoryReader("data").load_data() pdf_documents = [doc for doc in documents if doc.filename.endswith('.pdf')] print(len(pdf_documents)) # %%
[ "langchain.embeddings.HuggingFaceInferenceAPIEmbeddings" ]
[((33, 91), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (52, 91), False, 'import logging\n'), ((717, 730), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (728, 730), False, 'from dotenv import load_dotenv\n'), ((760, 852), 'llama_index.llms.HuggingFaceInferenceAPI', 'HuggingFaceInferenceAPI', ([], {'model_name': '"""mistralai/Mistral-7B-Instruct-v0.2"""', 'token': 'HF_TOKEN'}), "(model_name='mistralai/Mistral-7B-Instruct-v0.2',\n token=HF_TOKEN)\n", (783, 852), False, 'from llama_index.llms import HuggingFaceInferenceAPI\n'), ((123, 163), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (144, 163), False, 'import logging\n'), ((912, 1001), 'langchain.embeddings.HuggingFaceInferenceAPIEmbeddings', 'HuggingFaceInferenceAPIEmbeddings', ([], {'api_key': 'HF_TOKEN', 'model_name': '"""thenlper/gte-large"""'}), "(api_key=HF_TOKEN, model_name=\n 'thenlper/gte-large')\n", (945, 1001), False, 'from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings\n'), ((92, 111), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (109, 111), False, 'import logging\n'), ((1016, 1045), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1037, 1045), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, KnowledgeGraphIndex\n')]
#CAMEL Role-Playing Autonomous Cooperative Agents ''' This is a langchain implementation of paper: “CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society". ''' #Overview: ''' The rapid advancement of conversational and chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents and provide insight into their “cognitive” processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of chat agents, providing a valuable resource for investigating conversational language models. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agentsand beyond. The original implementation: https://github.com/lightaime/camel Project website: https://www.camel-ai.org/ ''' #Import LangChain related modules from typing import List from langchain.chat_models import ChatOpenAI import os os.environ["OPENAI_API_KEY"] ="Your_OPENAI_API_KEY" from langchain.prompts.chat import ( SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.schema import ( AIMessage, HumanMessage, SystemMessage, BaseMessage, ) #Define a CAMEL agent helper class def camel(): class CAMELAgent: def __init__( self, system_message: SystemMessage, model: ChatOpenAI, ) -> None: self.system_message = system_message self.model = model self.init_messages() def reset(self) -> None: self.init_messages() return self.stored_messages def init_messages(self) -> None: self.stored_messages = [self.system_message] def update_messages(self, message: BaseMessage) -> List[BaseMessage]: self.stored_messages.append(message) return self.stored_messages def step( self, input_message: HumanMessage, ) -> AIMessage: messages = self.update_messages(input_message) output_message = self.model(messages) self.update_messages(output_message) return output_message #Setup OpenAI API key and roles and task for role-playing assistant_role_name = "Python Programmer" user_role_name = "Stock Trader" task = "Develop a trading bot for the stock market" word_limit = 50 # word limit for task brainstorming #Create a task specify agent for brainstorming and get the specified task task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.") task_specifier_prompt = ( """Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}. Please make it more specific. Be creative and imaginative. Please reply with the specified task in {word_limit} words or less. Do not add anything else.""" ) task_specifier_template = HumanMessagePromptTemplate.from_template(template=task_specifier_prompt) task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0)) task_specifier_msg = task_specifier_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task, word_limit=word_limit)[0] specified_task_msg = task_specify_agent.step(task_specifier_msg) print(f"Specified task: {specified_task_msg.content}") specified_task = specified_task_msg.content #Create inception prompts for AI assistant and AI user for role-playing assistant_inception_prompt = ( """Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me! We share a common interest in collaborating to successfully complete a task. You must help me to complete the task. Here is the task: {task}. Never forget our task! I must instruct you based on your expertise and my needs to complete the task. I must give you one instruction at a time. You must write a specific solution that appropriately completes the requested instruction. You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons. Do not add anything else other than your solution to my instruction. You are never supposed to ask me any questions you only answer questions. You are never supposed to reply with a flake solution. Explain your solutions. Your solution must be declarative sentences and simple present tense. Unless I say the task is completed, you should always start with: Solution: <YOUR_SOLUTION> <YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving. Always end <YOUR_SOLUTION> with: Next request.""" ) user_inception_prompt = ( """Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me. We share a common interest in collaborating to successfully complete a task. I must help you to complete the task. Here is the task: {task}. Never forget our task! You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways: 1. Instruct with a necessary input: Instruction: <YOUR_INSTRUCTION> Input: <YOUR_INPUT> 2. Instruct without any input: Instruction: <YOUR_INSTRUCTION> Input: None The "Instruction" describes a task or question. The paired "Input" provides further context or information for the requested "Instruction". You must give me one instruction at a time. I must write a response that appropriately completes the requested instruction. I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons. You should instruct me not ask me questions. Now you must start to instruct me using the two ways described above. Do not add anything else other than your instruction and the optional corresponding input! Keep giving me instructions and necessary inputs until you think the task is completed. When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>. Never say <CAMEL_TASK_DONE> unless my responses have solved your task.""" ) #Create a helper helper to get system messages for AI assistant and AI user from role names and the task def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str): assistant_sys_template = SystemMessagePromptTemplate.from_template(template=assistant_inception_prompt) assistant_sys_msg = assistant_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0] user_sys_template = SystemMessagePromptTemplate.from_template(template=user_inception_prompt) user_sys_msg = user_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0] return assistant_sys_msg, user_sys_msg #Create AI assistant agent and AI user agent from obtained system messages assistant_sys_msg, user_sys_msg = get_sys_msgs(assistant_role_name, user_role_name, specified_task) assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2)) user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2)) # Reset agents assistant_agent.reset() user_agent.reset() # Initialize chats assistant_msg = HumanMessage( content=(f"{user_sys_msg.content}. " "Now start to give me introductions one by one. " "Only reply with Instruction and Input.")) user_msg = HumanMessage(content=f"{assistant_sys_msg.content}") user_msg = assistant_agent.step(user_msg) #Start role-playing session to solve the task! print(f"Original task prompt:\n{task}\n") print(f"Specified task prompt:\n{specified_task}\n") chat_turn_limit, n = 30, 0 while n < chat_turn_limit: n += 1 user_ai_msg = user_agent.step(assistant_msg) user_msg = HumanMessage(content=user_ai_msg.content) print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n") assistant_ai_msg = assistant_agent.step(user_msg) assistant_msg = HumanMessage(content=assistant_ai_msg.content) print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n") if "<CAMEL_TASK_DONE>" in user_msg.content: break camel()
[ "langchain.prompts.chat.SystemMessagePromptTemplate.from_template", "langchain.chat_models.ChatOpenAI", "langchain.schema.HumanMessage", "langchain.schema.SystemMessage", "langchain.prompts.chat.HumanMessagePromptTemplate.from_template" ]
[((3268, 3327), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You can make a task more specific."""'}), "(content='You can make a task more specific.')\n", (3281, 3327), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage\n'), ((3655, 3727), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', ([], {'template': 'task_specifier_prompt'}), '(template=task_specifier_prompt)\n', (3695, 3727), False, 'from langchain.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((8462, 8606), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'f"""{user_sys_msg.content}. Now start to give me introductions one by one. Only reply with Instruction and Input."""'}), "(content=\n f'{user_sys_msg.content}. Now start to give me introductions one by one. Only reply with Instruction and Input.'\n )\n", (8474, 8606), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage\n'), ((8670, 8722), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'f"""{assistant_sys_msg.content}"""'}), "(content=f'{assistant_sys_msg.content}')\n", (8682, 8722), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage\n'), ((3788, 3815), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(1.0)'}), '(temperature=1.0)\n', (3798, 3815), False, 'from langchain.chat_models import ChatOpenAI\n'), ((7467, 7545), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', ([], {'template': 'assistant_inception_prompt'}), '(template=assistant_inception_prompt)\n', (7508, 7545), False, 'from langchain.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((7736, 7809), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', ([], {'template': 'user_inception_prompt'}), '(template=user_inception_prompt)\n', (7777, 7809), False, 'from langchain.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((8246, 8273), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.2)'}), '(temperature=0.2)\n', (8256, 8273), False, 'from langchain.chat_models import ChatOpenAI\n'), ((8317, 8344), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.2)'}), '(temperature=0.2)\n', (8327, 8344), False, 'from langchain.chat_models import ChatOpenAI\n'), ((9074, 9115), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_ai_msg.content'}), '(content=user_ai_msg.content)\n', (9086, 9115), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage\n'), ((9279, 9325), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'assistant_ai_msg.content'}), '(content=assistant_ai_msg.content)\n', (9291, 9325), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, BaseMessage\n')]
# Copyright 2023-2024 ByteBrain AI # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import langchain from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain from langchain.embeddings import OpenAIEmbeddings from langchain.llms import OpenAI from langchain.schema import Document from langchain.vectorstores import Weaviate from weaviate import Client langchain.verbose = True texts = [ "Scala is a functional Programming Language", "I love functional programming", "fp is too simple an is not hard to understand", "women must adore their husbands", "ZIO is a good library for writing fp apps", "Feminism is the belief that all genders should have equal rights and opportunities.", "This movement is about making the world a better place for everyone", "The purpose of ZIO Chat Bot is to provide list of ZIO Projects", "I've got a cold and I've sore throat", "ZIO chat bot is an open source project." ] docs = [Document(page_content=t, metadata={"source": i}) for i, t in enumerate(texts)] embeddings: OpenAIEmbeddings = OpenAIEmbeddings() weaviate_client = Client(url="http://localhost:8080") vector_store = Weaviate.from_documents(docs, embedding=embeddings, weaviate_url = "http://127.0.0.1:8080") # vector_store = FAISS.from_documents(documents=docs, embedding=embeddings) retriever = vector_store.as_retriever() retrievalQA = RetrievalQAWithSourcesChain.from_llm(llm=OpenAI(verbose=True), retriever=retriever) async def run_qa(): result = await retrievalQA.acall({'question': 'what is the zio chat?'}) print(result) print("Hello") if __name__ == "__main__": import tracemalloc tracemalloc.start() asyncio.run(run_qa())
[ "langchain.vectorstores.Weaviate.from_documents", "langchain.schema.Document", "langchain.embeddings.OpenAIEmbeddings", "langchain.llms.OpenAI" ]
[((1605, 1623), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1621, 1623), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1643, 1678), 'weaviate.Client', 'Client', ([], {'url': '"""http://localhost:8080"""'}), "(url='http://localhost:8080')\n", (1649, 1678), False, 'from weaviate import Client\n'), ((1695, 1789), 'langchain.vectorstores.Weaviate.from_documents', 'Weaviate.from_documents', (['docs'], {'embedding': 'embeddings', 'weaviate_url': '"""http://127.0.0.1:8080"""'}), "(docs, embedding=embeddings, weaviate_url=\n 'http://127.0.0.1:8080')\n", (1718, 1789), False, 'from langchain.vectorstores import Weaviate\n'), ((1494, 1542), 'langchain.schema.Document', 'Document', ([], {'page_content': 't', 'metadata': "{'source': i}"}), "(page_content=t, metadata={'source': i})\n", (1502, 1542), False, 'from langchain.schema import Document\n'), ((2196, 2215), 'tracemalloc.start', 'tracemalloc.start', ([], {}), '()\n', (2213, 2215), False, 'import tracemalloc\n'), ((1961, 1981), 'langchain.llms.OpenAI', 'OpenAI', ([], {'verbose': '(True)'}), '(verbose=True)\n', (1967, 1981), False, 'from langchain.llms import OpenAI\n')]
import streamlit as st import langchain_helper as lc st.title("Restaurant Name generator") cusine = st.sidebar.selectbox("Pick a Cusine", ("Indian", "Italian", "Mexican", "Arabic")) if cusine: response = lc.generate_restaurent_name_and_items(cusine) st.header(response['restaurant_name'].strip()) menu_items = response['items'].strip().split(",") st.write("**Menu Items**") for item in menu_items : st.write("-", item)
[ "langchain_helper.generate_restaurent_name_and_items" ]
[((55, 92), 'streamlit.title', 'st.title', (['"""Restaurant Name generator"""'], {}), "('Restaurant Name generator')\n", (63, 92), True, 'import streamlit as st\n'), ((105, 190), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Pick a Cusine"""', "('Indian', 'Italian', 'Mexican', 'Arabic')"], {}), "('Pick a Cusine', ('Indian', 'Italian', 'Mexican',\n 'Arabic'))\n", (125, 190), True, 'import streamlit as st\n'), ((217, 262), 'langchain_helper.generate_restaurent_name_and_items', 'lc.generate_restaurent_name_and_items', (['cusine'], {}), '(cusine)\n', (254, 262), True, 'import langchain_helper as lc\n'), ((375, 401), 'streamlit.write', 'st.write', (['"""**Menu Items**"""'], {}), "('**Menu Items**')\n", (383, 401), True, 'import streamlit as st\n'), ((441, 460), 'streamlit.write', 'st.write', (['"""-"""', 'item'], {}), "('-', item)\n", (449, 460), True, 'import streamlit as st\n')]
from langchain.document_loaders import DirectoryLoader from langchain.indexes import VectorstoreIndexCreator import langchain langchain.verbose = True # loader = DirectoryLoader("../langchain/docs/_build/html/", glob="**/*.html") loader = DirectoryLoader("../demo/", glob="*.html") index = VectorstoreIndexCreator().from_loaders([loader]) print("index created") result = index.query("呪術廻戦の概要を1文で説明してください。") print(f"result: {result}")
[ "langchain.document_loaders.DirectoryLoader", "langchain.indexes.VectorstoreIndexCreator" ]
[((242, 284), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['"""../demo/"""'], {'glob': '"""*.html"""'}), "('../demo/', glob='*.html')\n", (257, 284), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((293, 318), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {}), '()\n', (316, 318), False, 'from langchain.indexes import VectorstoreIndexCreator\n')]
import streamlit as st import subprocess from langchain_core.messages import AIMessage, HumanMessage from langchain_local import LangchainLocal from uploadFile import UploadFile from helper.helper import Helper from ingest import GetVectorstore def configure_api_key(api_key_name): # Configure the API key api_key = st.secrets[api_key_name] # Create an instance of the Helper class helper = Helper() # Check if the API key is set if api_key == "": # Prompt the user to enter the API key api_key = st.text_input(f"{api_key_name.capitalize()} API Key", type="password") # If the user enters an API key, write it to the secrets.toml file. if st.button("Set API_KEY") and api_key != "": helper.set_api_key(api_key_name, api_key) st.rerun() else: # If the API key is already set, display a message if not st.session_state.api_state_update: st.write("API key is set✅") # Provide an option to update the API key with st.expander( "Update API Key?", ): api_key = st.text_input("Enter API key", type="password", key="second") # If the user enters a new API key, update it in the secrets.toml file. if st.button("Confirm") and api_key != "": helper.set_api_key(api_key_name, api_key) # st.toast("API_KEY Updated✅") st.session_state.api_state_update = True # st.rerun() # Display a toast message when the API key is updated if st.session_state.api_state_update: st.toast("API_KEY Updated✅") st.session_state.api_state_update = False ############################################################# def on_embedding_model_change(selected_model): if selected_model != st.session_state.embedding_model: st.session_state.embedding_model = selected_model st.session_state.embedding_model_change_state = True def select_embedding_model(): if st.session_state.embedding_model == "Ollama Embeddings": model_name = "Ollama Embeddings" elif st.session_state.embedding_model == "GooglePalm Embeddings": model_name = "GooglePalm Embeddings" else: raise ValueError("Unknown embedding model") return model_name def process_prompt(): # Display chat history langchain_local = LangchainLocal(st.session_state) for message in st.session_state.chat_dialog_history: if isinstance(message, AIMessage): with st.chat_message("AI"): st.write(message.content) elif isinstance(message, HumanMessage): with st.chat_message("Human"): st.write(message.content) if prompt := st.chat_input("Ask a question about your documents"): st.session_state.chat_dialog_history.append(HumanMessage(content=prompt)) with st.chat_message("Human"): st.write(prompt) with st.chat_message("AI"): response = st.write_stream( langchain_local.get_response( user_input=prompt, chat_history=st.session_state.chat_dialog_history, vectorstore=st.session_state.vectorstore, ) ) st.session_state.chat_dialog_history.append(AIMessage(content=response)) def load_models(): LLM_TYPES = ["Groq", "Ollama", "Google"] EMBEDDING_MODELS = ["Ollama Embeddings", "GooglePalm Embeddings"] # Checking the available ollama models try: ollama_list_output = ( subprocess.check_output(["ollama", "list"]).decode().split("\n") ) except Exception: try: ollama_list_output = ( subprocess.check_output( ["docker", "exec", "-it", "ollama", "ollama", "list"] ) .decode() .split("\n") ) except Exception: ollama_list_output = [] OLLAMA_MODELS = [ line.split()[0] for line in ollama_list_output if ":" in line and "ollama:" not in line ] # Define Groq models GROQ_MODELS = ["mixtral-8x7b-32768", "llama2-70b-4096"] model_type = st.selectbox("Select LLM ⬇️", LLM_TYPES) if model_type == "Google": # configure_google_palm() configure_api_key("palm_api_key") elif model_type == "Ollama": if not OLLAMA_MODELS: st.error( "Ollama is not configured properly, Make sure:\n\n" "1. You have installed Ollama.\n" "2. Ollama is running.\n" "3. You have downloaded an Ollama model like Mistral 7B." ) st.session_state.error = True else: st.session_state.ollama_model = st.selectbox("Ollama Model", OLLAMA_MODELS) elif model_type == "Groq": st.session_state.groq_model = st.selectbox("Groq Model", GROQ_MODELS) # configure_groq_api() configure_api_key("groq_api_key") st.session_state.llm_type = model_type # handling the embedding models embedding_model = st.radio("Embedding Model ⬇️", EMBEDDING_MODELS) on_embedding_model_change(embedding_model) def initialize_ui(): st.set_page_config( page_title="Pustak GPT", page_icon=":books:", layout="wide", initial_sidebar_state="auto", menu_items=None, ) st.title("Pustak GPT") initialize_session_state() def initialize_session_state(): # checking the session state for the conversation if "conversation" not in st.session_state: st.session_state.conversation = None if "chat_dialog_history" not in st.session_state.keys(): st.session_state.chat_dialog_history = [ AIMessage(content="Hello! How can I help you today?"), ] # using for the check of uploaded documents if "disabled" not in st.session_state: st.session_state.disabled = True # using for api state check if "api_state_update" not in st.session_state: st.session_state.api_state_update = False if "ollama_model" not in st.session_state: st.session_state.ollama_model = "" if "embedding_model" not in st.session_state: st.session_state.embedding_model = " " if "llm_type" not in st.session_state: st.session_state.llm_type = "Google" if "embedding_model_change_state" not in st.session_state: st.session_state.embedding_model_change_state = False if "error" not in st.session_state: st.session_state.error = False if "vectorstore" not in st.session_state: st.session_state.vectorstore = [] if "messages" not in st.session_state: st.session_state.messages = [] if "groq_model" not in st.session_state: st.session_state.groq_model = "" def process_documents(): if st.session_state.disabled: st.write("🔒 Please upload and process your PDFs to unlock the question field.") load_models() upload_and_process_files() else: process_prompt() def upload_and_process_files(): documents = st.file_uploader( "Upload the PDFs here:", accept_multiple_files=True, type=["xlsx", "xls", "csv", "pptx", "docx", "pdf", "txt"], ) if documents and st.button( "Process", type="secondary", use_container_width=True, disabled=st.session_state.error, ): st.toast( """Hang tight! the documents are being processed for you, it might take several minutes depending on the size of your documents""", icon="🤖", ) with st.spinner("Processing..."): if ( st.session_state.disabled or st.session_state.embedding_model_change_state ): process_uploaded_documents(documents) st.session_state.disabled = False st.rerun() def process_uploaded_documents(documents): text_chunks = [] for docs in documents: upload = UploadFile(docs) splits = upload.get_document_splits() text_chunks.extend(splits) model_name = select_embedding_model() get_vectorstore_instance = GetVectorstore() st.session_state.vectorstore = get_vectorstore_instance.get_vectorstore( text_chunks, model_name ) # st.session_state.vectorstore = get_vectorstore(text_chunks) st.session_state.embedding_model_change_state = False # retriever_chain = get_context_retriever_chain(st.session_state.vectorstore) # st.session_state.conversation = get_conversational_rag_chain(retriever_chain) def main(): initialize_ui() process_documents() if __name__ == "__main__": main()
[ "langchain_core.messages.AIMessage", "langchain_core.messages.HumanMessage", "langchain_local.LangchainLocal" ]
[((412, 420), 'helper.helper.Helper', 'Helper', ([], {}), '()\n', (418, 420), False, 'from helper.helper import Helper\n'), ((2403, 2435), 'langchain_local.LangchainLocal', 'LangchainLocal', (['st.session_state'], {}), '(st.session_state)\n', (2417, 2435), False, 'from langchain_local import LangchainLocal\n'), ((4269, 4309), 'streamlit.selectbox', 'st.selectbox', (['"""Select LLM ⬇️"""', 'LLM_TYPES'], {}), "('Select LLM ⬇️', LLM_TYPES)\n", (4281, 4309), True, 'import streamlit as st\n'), ((5177, 5225), 'streamlit.radio', 'st.radio', (['"""Embedding Model ⬇️"""', 'EMBEDDING_MODELS'], {}), "('Embedding Model ⬇️', EMBEDDING_MODELS)\n", (5185, 5225), True, 'import streamlit as st\n'), ((5300, 5431), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Pustak GPT"""', 'page_icon': '""":books:"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title='Pustak GPT', page_icon=':books:', layout=\n 'wide', initial_sidebar_state='auto', menu_items=None)\n", (5318, 5431), True, 'import streamlit as st\n'), ((5479, 5501), 'streamlit.title', 'st.title', (['"""Pustak GPT"""'], {}), "('Pustak GPT')\n", (5487, 5501), True, 'import streamlit as st\n'), ((7191, 7324), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload the PDFs here:"""'], {'accept_multiple_files': '(True)', 'type': "['xlsx', 'xls', 'csv', 'pptx', 'docx', 'pdf', 'txt']"}), "('Upload the PDFs here:', accept_multiple_files=True, type=\n ['xlsx', 'xls', 'csv', 'pptx', 'docx', 'pdf', 'txt'])\n", (7207, 7324), True, 'import streamlit as st\n'), ((8313, 8329), 'ingest.GetVectorstore', 'GetVectorstore', ([], {}), '()\n', (8327, 8329), False, 'from ingest import GetVectorstore\n'), ((1626, 1654), 'streamlit.toast', 'st.toast', (['"""API_KEY Updated✅"""'], {}), "('API_KEY Updated✅')\n", (1634, 1654), True, 'import streamlit as st\n'), ((2769, 2821), 'streamlit.chat_input', 'st.chat_input', (['"""Ask a question about your documents"""'], {}), "('Ask a question about your documents')\n", (2782, 2821), True, 'import streamlit as st\n'), ((5749, 5772), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (5770, 5772), True, 'import streamlit as st\n'), ((6969, 7048), 'streamlit.write', 'st.write', (['"""🔒 Please upload and process your PDFs to unlock the question field."""'], {}), "('🔒 Please upload and process your PDFs to unlock the question field.')\n", (6977, 7048), True, 'import streamlit as st\n'), ((7372, 7474), 'streamlit.button', 'st.button', (['"""Process"""'], {'type': '"""secondary"""', 'use_container_width': '(True)', 'disabled': 'st.session_state.error'}), "('Process', type='secondary', use_container_width=True, disabled=\n st.session_state.error)\n", (7381, 7474), True, 'import streamlit as st\n'), ((7518, 7694), 'streamlit.toast', 'st.toast', (['"""Hang tight! the documents are being processed for you,\n it might take several minutes depending on the size of your documents"""'], {'icon': '"""🤖"""'}), '(\n """Hang tight! the documents are being processed for you,\n it might take several minutes depending on the size of your documents"""\n , icon=\'🤖\')\n', (7526, 7694), True, 'import streamlit as st\n'), ((8142, 8158), 'uploadFile.UploadFile', 'UploadFile', (['docs'], {}), '(docs)\n', (8152, 8158), False, 'from uploadFile import UploadFile\n'), ((702, 726), 'streamlit.button', 'st.button', (['"""Set API_KEY"""'], {}), "('Set API_KEY')\n", (711, 726), True, 'import streamlit as st\n'), ((812, 822), 'streamlit.rerun', 'st.rerun', ([], {}), '()\n', (820, 822), True, 'import streamlit as st\n'), ((955, 982), 'streamlit.write', 'st.write', (['"""API key is set✅"""'], {}), "('API key is set✅')\n", (963, 982), True, 'import streamlit as st\n'), ((1047, 1077), 'streamlit.expander', 'st.expander', (['"""Update API Key?"""'], {}), "('Update API Key?')\n", (1058, 1077), True, 'import streamlit as st\n'), ((1124, 1185), 'streamlit.text_input', 'st.text_input', (['"""Enter API key"""'], {'type': '"""password"""', 'key': '"""second"""'}), "('Enter API key', type='password', key='second')\n", (1137, 1185), True, 'import streamlit as st\n'), ((2875, 2903), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (2887, 2903), False, 'from langchain_core.messages import AIMessage, HumanMessage\n'), ((2919, 2943), 'streamlit.chat_message', 'st.chat_message', (['"""Human"""'], {}), "('Human')\n", (2934, 2943), True, 'import streamlit as st\n'), ((2957, 2973), 'streamlit.write', 'st.write', (['prompt'], {}), '(prompt)\n', (2965, 2973), True, 'import streamlit as st\n'), ((2988, 3009), 'streamlit.chat_message', 'st.chat_message', (['"""AI"""'], {}), "('AI')\n", (3003, 3009), True, 'import streamlit as st\n'), ((3354, 3381), 'langchain_core.messages.AIMessage', 'AIMessage', ([], {'content': 'response'}), '(content=response)\n', (3363, 3381), False, 'from langchain_core.messages import AIMessage, HumanMessage\n'), ((5835, 5888), 'langchain_core.messages.AIMessage', 'AIMessage', ([], {'content': '"""Hello! How can I help you today?"""'}), "(content='Hello! How can I help you today?')\n", (5844, 5888), False, 'from langchain_core.messages import AIMessage, HumanMessage\n'), ((7733, 7760), 'streamlit.spinner', 'st.spinner', (['"""Processing..."""'], {}), "('Processing...')\n", (7743, 7760), True, 'import streamlit as st\n'), ((1286, 1306), 'streamlit.button', 'st.button', (['"""Confirm"""'], {}), "('Confirm')\n", (1295, 1306), True, 'import streamlit as st\n'), ((2553, 2574), 'streamlit.chat_message', 'st.chat_message', (['"""AI"""'], {}), "('AI')\n", (2568, 2574), True, 'import streamlit as st\n'), ((2592, 2617), 'streamlit.write', 'st.write', (['message.content'], {}), '(message.content)\n', (2600, 2617), True, 'import streamlit as st\n'), ((4492, 4672), 'streamlit.error', 'st.error', (['"""Ollama is not configured properly, Make sure:\n\n1. You have installed Ollama.\n2. Ollama is running.\n3. You have downloaded an Ollama model like Mistral 7B."""'], {}), '(\n """Ollama is not configured properly, Make sure:\n\n1. You have installed Ollama.\n2. Ollama is running.\n3. You have downloaded an Ollama model like Mistral 7B."""\n )\n', (4500, 4672), True, 'import streamlit as st\n'), ((4850, 4893), 'streamlit.selectbox', 'st.selectbox', (['"""Ollama Model"""', 'OLLAMA_MODELS'], {}), "('Ollama Model', OLLAMA_MODELS)\n", (4862, 4893), True, 'import streamlit as st\n'), ((4963, 5002), 'streamlit.selectbox', 'st.selectbox', (['"""Groq Model"""', 'GROQ_MODELS'], {}), "('Groq Model', GROQ_MODELS)\n", (4975, 5002), True, 'import streamlit as st\n'), ((8021, 8031), 'streamlit.rerun', 'st.rerun', ([], {}), '()\n', (8029, 8031), True, 'import streamlit as st\n'), ((2683, 2707), 'streamlit.chat_message', 'st.chat_message', (['"""Human"""'], {}), "('Human')\n", (2698, 2707), True, 'import streamlit as st\n'), ((2725, 2750), 'streamlit.write', 'st.write', (['message.content'], {}), '(message.content)\n', (2733, 2750), True, 'import streamlit as st\n'), ((3615, 3658), 'subprocess.check_output', 'subprocess.check_output', (["['ollama', 'list']"], {}), "(['ollama', 'list'])\n", (3638, 3658), False, 'import subprocess\n'), ((3776, 3854), 'subprocess.check_output', 'subprocess.check_output', (["['docker', 'exec', '-it', 'ollama', 'ollama', 'list']"], {}), "(['docker', 'exec', '-it', 'ollama', 'ollama', 'list'])\n", (3799, 3854), False, 'import subprocess\n')]
import streamlit as st import langchain from langchain_community.chat_models import ChatOllama from langchain.cache import InMemoryCache from dotenv import load_dotenv from langchain_community.embeddings import OllamaEmbeddings import os from PIL import Image from chroma_main import answer_no_retriever langchain.cache = InMemoryCache() load_dotenv() CHROMA_DB = "./chroma_db" MODEL = os.getenv("MODEL", "llama2") OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434") llm = ChatOllama(base_url=OLLAMA_BASE_URL, model=MODEL, temperature=0.0) ollama_embeddings = OllamaEmbeddings(base_url=OLLAMA_BASE_URL, model="codellama") st.button("clear history", type="primary") if st.button: st.session_state.messages = [] if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) if prompt := st.chat_input("What is your query?"): st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").markdown(prompt) response = answer_no_retriever(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() # message_placeholder.markdown(response + "▌") message_placeholder.markdown(response) st.session_state.messages.append({"role": "assistant", "content": response})
[ "langchain_community.embeddings.OllamaEmbeddings", "langchain_community.chat_models.ChatOllama", "langchain.cache.InMemoryCache" ]
[((324, 339), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (337, 339), False, 'from langchain.cache import InMemoryCache\n'), ((341, 354), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (352, 354), False, 'from dotenv import load_dotenv\n'), ((390, 418), 'os.getenv', 'os.getenv', (['"""MODEL"""', '"""llama2"""'], {}), "('MODEL', 'llama2')\n", (399, 418), False, 'import os\n'), ((438, 492), 'os.getenv', 'os.getenv', (['"""OLLAMA_BASE_URL"""', '"""http://localhost:11434"""'], {}), "('OLLAMA_BASE_URL', 'http://localhost:11434')\n", (447, 492), False, 'import os\n'), ((499, 565), 'langchain_community.chat_models.ChatOllama', 'ChatOllama', ([], {'base_url': 'OLLAMA_BASE_URL', 'model': 'MODEL', 'temperature': '(0.0)'}), '(base_url=OLLAMA_BASE_URL, model=MODEL, temperature=0.0)\n', (509, 565), False, 'from langchain_community.chat_models import ChatOllama\n'), ((586, 647), 'langchain_community.embeddings.OllamaEmbeddings', 'OllamaEmbeddings', ([], {'base_url': 'OLLAMA_BASE_URL', 'model': '"""codellama"""'}), "(base_url=OLLAMA_BASE_URL, model='codellama')\n", (602, 647), False, 'from langchain_community.embeddings import OllamaEmbeddings\n'), ((651, 693), 'streamlit.button', 'st.button', (['"""clear history"""'], {'type': '"""primary"""'}), "('clear history', type='primary')\n", (660, 693), True, 'import streamlit as st\n'), ((968, 1004), 'streamlit.chat_input', 'st.chat_input', (['"""What is your query?"""'], {}), "('What is your query?')\n", (981, 1004), True, 'import streamlit as st\n'), ((1010, 1079), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1042, 1079), True, 'import streamlit as st\n'), ((1149, 1176), 'chroma_main.answer_no_retriever', 'answer_no_retriever', (['prompt'], {}), '(prompt)\n', (1168, 1176), False, 'from chroma_main import answer_no_retriever\n'), ((1364, 1440), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': response}"], {}), "({'role': 'assistant', 'content': response})\n", (1396, 1440), True, 'import streamlit as st\n'), ((879, 911), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (894, 911), True, 'import streamlit as st\n'), ((921, 952), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (932, 952), True, 'import streamlit as st\n'), ((1187, 1215), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (1202, 1215), True, 'import streamlit as st\n'), ((1247, 1257), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (1255, 1257), True, 'import streamlit as st\n'), ((1084, 1107), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (1099, 1107), True, 'import streamlit as st\n')]
# Copyright 2023-2024 ByteBrain AI # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import langchain from langchain.embeddings.openai import OpenAIEmbeddings from langchain.llms import OpenAI from langchain.schema import Document from langchain.vectorstores import FAISS langchain.verbose = True # langchain.debug=True import asyncio from core.utils.upgrade_sqlite import upgrade_sqlite_version upgrade_sqlite_version() embeddings: OpenAIEmbeddings = OpenAIEmbeddings() texts = [ "Scala is a functional Programming Language", "I love functional programming", "fp is too simple an is not hard to understand", "women must adore their husbands", "ZIO is a good library for writing fp apps", "Feminism is the belief that all genders should have equal rights and opportunities.", "This movement is about making the world a better place for everyone", "The purpose of ZIO Chat Bot is to provide list of ZIO Projects", "I've got a cold and I've sore throat", "ZIO chat bot is an open source project." ] docs = [Document(page_content=t, metadata={"source": i}) for i, t in enumerate(texts)] vectorstore = FAISS.from_documents(documents=docs, embedding=OpenAIEmbeddings()) retriever = vectorstore.as_retriever() from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain qa = load_qa_with_sources_chain(llm=OpenAI(), verbose=True) question = 'what is the zio chat bot?' async def run_qa(): result = await qa._acall({'question': question, 'input_documents': retriever.get_relevant_documents(question)}) print(result) print("Hello") if __name__ == "__main__": asyncio.run(run_qa(), debug=True)
[ "langchain.schema.Document", "langchain.llms.OpenAI", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((896, 920), 'core.utils.upgrade_sqlite.upgrade_sqlite_version', 'upgrade_sqlite_version', ([], {}), '()\n', (918, 920), False, 'from core.utils.upgrade_sqlite import upgrade_sqlite_version\n'), ((952, 970), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (968, 970), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1547, 1595), 'langchain.schema.Document', 'Document', ([], {'page_content': 't', 'metadata': "{'source': i}"}), "(page_content=t, metadata={'source': i})\n", (1555, 1595), False, 'from langchain.schema import Document\n'), ((1688, 1706), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1704, 1706), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1865, 1873), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (1871, 1873), False, 'from langchain.llms import OpenAI\n')]
import logging from dotenv import load_dotenv from llama_index import VectorStoreIndex import pandas as pd from ragas.metrics import answer_relevancy from ragas.llama_index import evaluate from ragas.llms import LangchainLLM from langchain.chat_models import AzureChatOpenAI from langchain.embeddings import AzureOpenAIEmbeddings from app.llama_index.vector_store import setup_vector_store from app.llama_index.llm import setup_service_context from app.utils.env import get_env_variable from app.eval.constants import ( DATASET_JSON_PATH, EVAL_METRICS, EVAL_VECTOR_STORE_NAME, SERVICE_CONTEXT_VERSION, ) from app.eval.dataset_generation import generate_ragas_qr_pairs def setup_ragas_llm(): load_dotenv() try: api_key = get_env_variable("OPENAI_API_KEY") api_version = get_env_variable("OPENAI_API_VERSION") deployment_name = get_env_variable("OPENAI_DEPLOYMENT_NAME") except EnvironmentError as e: raise e azure_model = AzureChatOpenAI( deployment_name=deployment_name, model=api_version, openai_api_key=api_key, openai_api_type="azure", ) logging.info("Azure OpenAI model for Ragas successfully set up.") return LangchainLLM(azure_model) def setup_ragas_embeddings(): load_dotenv() try: deployment = get_env_variable("OPENAI_DEPLOYMENT_EMBEDDINGS") api_base = get_env_variable("OPENAI_API_BASE") api_key = get_env_variable("OPENAI_API_KEY") api_version = get_env_variable("OPENAI_API_VERSION") except EnvironmentError as e: raise e azure_embeddings = AzureOpenAIEmbeddings( azure_deployment=deployment, model="text-embedding-ada-002", openai_api_type="azure", openai_api_base=api_base, openai_api_key=api_key, openai_api_version=api_version, ) logging.info("Azure OpenAI Embeddings for Ragas successfully set up.") return azure_embeddings def run_ragas_evaluation(): eval_questions, eval_answers = generate_ragas_qr_pairs(DATASET_JSON_PATH) eval_embeddings = setup_ragas_embeddings() eval_llm = setup_ragas_llm() eval_vector_store = setup_vector_store(EVAL_VECTOR_STORE_NAME) eval_service_context = setup_service_context(SERVICE_CONTEXT_VERSION, azure=True) index = VectorStoreIndex.from_vector_store( vector_store=eval_vector_store, service_context=eval_service_context ) query_engine = index.as_query_engine() logging.info("Ragas evaluation successfully set up.") metrics = EVAL_METRICS answer_relevancy.embeddings = eval_embeddings for m in metrics: m.__setattr__("llm", eval_llm) m.__setattr__("embeddings", eval_embeddings) logging.info("Ragas metrics successfully set up.") result = evaluate(query_engine, metrics, eval_questions, eval_answers) logging.info("Ragas evaluation successfully finished.") df = result.to_pandas() df.to_csv("app/eval/eval_data/ragas_eval.csv", index=False) logging.info("Ragas evaluation successfully saved to csv file.") eval = pd.read_csv("app/eval/eval_data/ragas_eval.csv", sep=",") logging.info("Ragas evaluation successfully finished.") return eval
[ "langchain.embeddings.AzureOpenAIEmbeddings", "langchain.chat_models.AzureChatOpenAI" ]
[((718, 731), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (729, 731), False, 'from dotenv import load_dotenv\n'), ((993, 1113), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'deployment_name': 'deployment_name', 'model': 'api_version', 'openai_api_key': 'api_key', 'openai_api_type': '"""azure"""'}), "(deployment_name=deployment_name, model=api_version,\n openai_api_key=api_key, openai_api_type='azure')\n", (1008, 1113), False, 'from langchain.chat_models import AzureChatOpenAI\n'), ((1153, 1218), 'logging.info', 'logging.info', (['"""Azure OpenAI model for Ragas successfully set up."""'], {}), "('Azure OpenAI model for Ragas successfully set up.')\n", (1165, 1218), False, 'import logging\n'), ((1230, 1255), 'ragas.llms.LangchainLLM', 'LangchainLLM', (['azure_model'], {}), '(azure_model)\n', (1242, 1255), False, 'from ragas.llms import LangchainLLM\n'), ((1292, 1305), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1303, 1305), False, 'from dotenv import load_dotenv\n'), ((1628, 1827), 'langchain.embeddings.AzureOpenAIEmbeddings', 'AzureOpenAIEmbeddings', ([], {'azure_deployment': 'deployment', 'model': '"""text-embedding-ada-002"""', 'openai_api_type': '"""azure"""', 'openai_api_base': 'api_base', 'openai_api_key': 'api_key', 'openai_api_version': 'api_version'}), "(azure_deployment=deployment, model=\n 'text-embedding-ada-002', openai_api_type='azure', openai_api_base=\n api_base, openai_api_key=api_key, openai_api_version=api_version)\n", (1649, 1827), False, 'from langchain.embeddings import AzureOpenAIEmbeddings\n'), ((1877, 1947), 'logging.info', 'logging.info', (['"""Azure OpenAI Embeddings for Ragas successfully set up."""'], {}), "('Azure OpenAI Embeddings for Ragas successfully set up.')\n", (1889, 1947), False, 'import logging\n'), ((2041, 2083), 'app.eval.dataset_generation.generate_ragas_qr_pairs', 'generate_ragas_qr_pairs', (['DATASET_JSON_PATH'], {}), '(DATASET_JSON_PATH)\n', (2064, 2083), False, 'from app.eval.dataset_generation import generate_ragas_qr_pairs\n'), ((2188, 2230), 'app.llama_index.vector_store.setup_vector_store', 'setup_vector_store', (['EVAL_VECTOR_STORE_NAME'], {}), '(EVAL_VECTOR_STORE_NAME)\n', (2206, 2230), False, 'from app.llama_index.vector_store import setup_vector_store\n'), ((2258, 2316), 'app.llama_index.llm.setup_service_context', 'setup_service_context', (['SERVICE_CONTEXT_VERSION'], {'azure': '(True)'}), '(SERVICE_CONTEXT_VERSION, azure=True)\n', (2279, 2316), False, 'from app.llama_index.llm import setup_service_context\n'), ((2329, 2437), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'eval_vector_store', 'service_context': 'eval_service_context'}), '(vector_store=eval_vector_store,\n service_context=eval_service_context)\n', (2363, 2437), False, 'from llama_index import VectorStoreIndex\n'), ((2495, 2548), 'logging.info', 'logging.info', (['"""Ragas evaluation successfully set up."""'], {}), "('Ragas evaluation successfully set up.')\n", (2507, 2548), False, 'import logging\n'), ((2745, 2795), 'logging.info', 'logging.info', (['"""Ragas metrics successfully set up."""'], {}), "('Ragas metrics successfully set up.')\n", (2757, 2795), False, 'import logging\n'), ((2809, 2870), 'ragas.llama_index.evaluate', 'evaluate', (['query_engine', 'metrics', 'eval_questions', 'eval_answers'], {}), '(query_engine, metrics, eval_questions, eval_answers)\n', (2817, 2870), False, 'from ragas.llama_index import evaluate\n'), ((2875, 2930), 'logging.info', 'logging.info', (['"""Ragas evaluation successfully finished."""'], {}), "('Ragas evaluation successfully finished.')\n", (2887, 2930), False, 'import logging\n'), ((3027, 3091), 'logging.info', 'logging.info', (['"""Ragas evaluation successfully saved to csv file."""'], {}), "('Ragas evaluation successfully saved to csv file.')\n", (3039, 3091), False, 'import logging\n'), ((3103, 3160), 'pandas.read_csv', 'pd.read_csv', (['"""app/eval/eval_data/ragas_eval.csv"""'], {'sep': '""","""'}), "('app/eval/eval_data/ragas_eval.csv', sep=',')\n", (3114, 3160), True, 'import pandas as pd\n'), ((3165, 3220), 'logging.info', 'logging.info', (['"""Ragas evaluation successfully finished."""'], {}), "('Ragas evaluation successfully finished.')\n", (3177, 3220), False, 'import logging\n'), ((759, 793), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (775, 793), False, 'from app.utils.env import get_env_variable\n'), ((816, 854), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_API_VERSION"""'], {}), "('OPENAI_API_VERSION')\n", (832, 854), False, 'from app.utils.env import get_env_variable\n'), ((881, 923), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_DEPLOYMENT_NAME"""'], {}), "('OPENAI_DEPLOYMENT_NAME')\n", (897, 923), False, 'from app.utils.env import get_env_variable\n'), ((1336, 1384), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_DEPLOYMENT_EMBEDDINGS"""'], {}), "('OPENAI_DEPLOYMENT_EMBEDDINGS')\n", (1352, 1384), False, 'from app.utils.env import get_env_variable\n'), ((1404, 1439), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_API_BASE"""'], {}), "('OPENAI_API_BASE')\n", (1420, 1439), False, 'from app.utils.env import get_env_variable\n'), ((1458, 1492), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1474, 1492), False, 'from app.utils.env import get_env_variable\n'), ((1515, 1553), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_API_VERSION"""'], {}), "('OPENAI_API_VERSION')\n", (1531, 1553), False, 'from app.utils.env import get_env_variable\n')]
from fastapi_poe import make_app from modal import Image, Stub, asgi_app from langchain_openai import LangchainOpenAIChatBot # A OpenAI powered chatbot built using Langchain. OPENAI_API_KEY = "YOUR API KEY" bot = LangchainOpenAIChatBot(OPENAI_API_KEY=OPENAI_API_KEY) # The following is setup code that is required to host with modal.com image = Image.debian_slim().pip_install_from_requirements("requirements.txt") # Rename "poe-server-bot-quick-start" to your preferred app name. stub = Stub("poe-server-bot-quick-start") @stub.function(image=image) @asgi_app() def fastapi_app(): # Optionally, provide your Poe access key here: # 1. You can go to https://poe.com/create_bot?server=1 to generate an access key. # 2. We strongly recommend using a key for a production bot to prevent abuse, # but the starter example disables the key check for convenience. # 3. You can also store your access key on modal.com and retrieve it in this function # by following the instructions at: https://modal.com/docs/guide/secrets POE_ACCESS_KEY = "YOUR ACCESS KEY" app = make_app(bot, access_key=POE_ACCESS_KEY) return app
[ "langchain_openai.LangchainOpenAIChatBot" ]
[((215, 268), 'langchain_openai.LangchainOpenAIChatBot', 'LangchainOpenAIChatBot', ([], {'OPENAI_API_KEY': 'OPENAI_API_KEY'}), '(OPENAI_API_KEY=OPENAI_API_KEY)\n', (237, 268), False, 'from langchain_openai import LangchainOpenAIChatBot\n'), ((491, 525), 'modal.Stub', 'Stub', (['"""poe-server-bot-quick-start"""'], {}), "('poe-server-bot-quick-start')\n", (495, 525), False, 'from modal import Image, Stub, asgi_app\n'), ((557, 567), 'modal.asgi_app', 'asgi_app', ([], {}), '()\n', (565, 567), False, 'from modal import Image, Stub, asgi_app\n'), ((1093, 1133), 'fastapi_poe.make_app', 'make_app', (['bot'], {'access_key': 'POE_ACCESS_KEY'}), '(bot, access_key=POE_ACCESS_KEY)\n', (1101, 1133), False, 'from fastapi_poe import make_app\n'), ((348, 367), 'modal.Image.debian_slim', 'Image.debian_slim', ([], {}), '()\n', (365, 367), False, 'from modal import Image, Stub, asgi_app\n')]
import os import uuid import langchain import requests import streamlit as st from dotenv import load_dotenv, find_dotenv from langchain_community.callbacks import get_openai_callback from langchain.schema import HumanMessage, AIMessage from playsound import playsound from streamlit_chat import message from advisor.agents import init_convo_agent langchain.debug = True def init(): load_dotenv(find_dotenv()) st.set_page_config( page_title="Your Restaurant Advisor", page_icon="👩‍🍳", ) st.header("Your Restaurant Advisor 👩‍🍳") hide_streamlit_style = """ <style> #MainMenu {visibility: hidden;} footer {visibility: hidden;} </style> """ st.markdown(hide_streamlit_style, unsafe_allow_html=True) def setup_agent(): if 'agent' not in st.session_state: random_session_id = str(uuid.uuid4()) st.session_state.agent = init_convo_agent(random_session_id) def get_response_from_ai(human_input): setup_agent() print("="*20) with get_openai_callback() as cb: result = st.session_state.agent.run(human_input) print("Cost:", cb) return result def get_voice_message(message): payload = { "text": message, "model_id": "eleven_monolingual_v1", "voice_settings": { "stability": 0, "similarity_boost": 0, } } headers = { "accept": "audio/mpeg", "xi-api-key": os.getenv("ELEVEN_LABS_API_KEY"), "Content-Type": "application/json" } response = requests.post('https://api.elevenlabs.io/v1/text-to-speech/21m00Tcm4TlvDq8ikWAM?optimize_streaming_latency=0', json=payload, headers=headers) if response.status_code == 200 and response.content: with open("audio.mp3", "wb") as f: f.write(response.content) playsound("audio.mp3") return response.content def main(): init() with st.sidebar: user_input = st.text_input("your message", value="") if "messages" not in st.session_state: st.session_state.messages = [] if user_input: st.session_state.messages.append(HumanMessage(content=user_input)) with st.spinner("Thinking..."): response = get_response_from_ai(user_input) # get_voice_message(response) st.session_state.messages.append(AIMessage(content=response)) messages = st.session_state.get('messages', []) for i, msg in enumerate(messages): if i % 2 == 0: message(msg.content, is_user=True, avatar_style="thumbs", key=str(i) + "_user") else: message(msg.content, is_user=False, avatar_style="avataaars", key=str(i) + "_ai") if __name__ == "__main__": main()
[ "langchain.schema.AIMessage", "langchain_community.callbacks.get_openai_callback", "langchain.schema.HumanMessage" ]
[((424, 502), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Your Restaurant Advisor"""', 'page_icon': '"""👩\u200d🍳"""'}), "(page_title='Your Restaurant Advisor', page_icon='👩\\u200d🍳')\n", (442, 502), True, 'import streamlit as st\n'), ((525, 570), 'streamlit.header', 'st.header', (['"""Your Restaurant Advisor 👩\u200d🍳"""'], {}), "('Your Restaurant Advisor 👩\\u200d🍳')\n", (534, 570), True, 'import streamlit as st\n'), ((744, 801), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (755, 801), True, 'import streamlit as st\n'), ((1596, 1747), 'requests.post', 'requests.post', (['"""https://api.elevenlabs.io/v1/text-to-speech/21m00Tcm4TlvDq8ikWAM?optimize_streaming_latency=0"""'], {'json': 'payload', 'headers': 'headers'}), "(\n 'https://api.elevenlabs.io/v1/text-to-speech/21m00Tcm4TlvDq8ikWAM?optimize_streaming_latency=0'\n , json=payload, headers=headers)\n", (1609, 1747), False, 'import requests\n'), ((2453, 2489), 'streamlit.session_state.get', 'st.session_state.get', (['"""messages"""', '[]'], {}), "('messages', [])\n", (2473, 2489), True, 'import streamlit as st\n'), ((404, 417), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (415, 417), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((942, 977), 'advisor.agents.init_convo_agent', 'init_convo_agent', (['random_session_id'], {}), '(random_session_id)\n', (958, 977), False, 'from advisor.agents import init_convo_agent\n'), ((1064, 1085), 'langchain_community.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1083, 1085), False, 'from langchain_community.callbacks import get_openai_callback\n'), ((1110, 1149), 'streamlit.session_state.agent.run', 'st.session_state.agent.run', (['human_input'], {}), '(human_input)\n', (1136, 1149), True, 'import streamlit as st\n'), ((1497, 1529), 'os.getenv', 'os.getenv', (['"""ELEVEN_LABS_API_KEY"""'], {}), "('ELEVEN_LABS_API_KEY')\n", (1506, 1529), False, 'import os\n'), ((1884, 1906), 'playsound.playsound', 'playsound', (['"""audio.mp3"""'], {}), "('audio.mp3')\n", (1893, 1906), False, 'from playsound import playsound\n'), ((2007, 2046), 'streamlit.text_input', 'st.text_input', (['"""your message"""'], {'value': '""""""'}), "('your message', value='')\n", (2020, 2046), True, 'import streamlit as st\n'), ((895, 907), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (905, 907), False, 'import uuid\n'), ((2191, 2223), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (2203, 2223), False, 'from langchain.schema import HumanMessage, AIMessage\n'), ((2238, 2263), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (2248, 2263), True, 'import streamlit as st\n'), ((2408, 2435), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'response'}), '(content=response)\n', (2417, 2435), False, 'from langchain.schema import HumanMessage, AIMessage\n')]
import langchain from langchain.agents.agent_toolkits import ( create_conversational_retrieval_agent, create_retriever_tool) from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.schema.messages import SystemMessage from langchain.vectorstores import FAISS from langchain.cache import SQLiteCache from langchain.callbacks import get_openai_callback SYS_PATH_LOCAL = '/workspaces/b3rn_zero_streamlit' SYS_PATH_STREAMLIT = '/app/b3rn_zero_streamlit/' SYS_PATH = SYS_PATH_STREAMLIT langchain.llm_cache = SQLiteCache(database_path=f"{SYS_PATH}/data/langchain_cache.db") def ask_agent__eak(query, openai_api_key, sys_path, model='gpt-4'): '''Display the answer to a question.''' embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) new_db = FAISS.load_local( f'{sys_path}/data/vectorstores/eak_admin_ch_defaultdocs_faiss_index_4096', embeddings) retriever = new_db.as_retriever() tool = create_retriever_tool( retriever, "content_of_eak_website", """ This tool is designed for an LLM that interacts with the content of the EAK website to retrieve documents. The EAK acts as a compensation fund for various federal entities. Its main responsibility is overseeing the implementation of the 1st pillar (AHV/IV) and the family compensation fund. The tool offers services related to: - Insurance - Contributions - Employer regulations - Pensions Furthermore, it provides insights into family allowances and facilitates electronic data exchange with the EAK via connect.eak. """ ) tools = [tool] system_message = SystemMessage( content=""" You are an expert for the eak_admin_website and: - Always answer questions citing the source. - The source is the URL you receive as a response from the eak_admin_website tool. - If you don't know an answer, state: "No source available, thus no answer possible". - Never invent URLs. Only use URLs from eak_admin_website. - Always respond in German. """ ) llm = ChatOpenAI(openai_api_key=openai_api_key, model=model, temperature=0, n=10, verbose=True) agent_executor = create_conversational_retrieval_agent( llm, tools, verbose=False, system_message=system_message, max_token_limit=3000) # heikel print(f"\nFrage: {query}") with get_openai_callback() as callback: answer = agent_executor({"input": query}) print(f"\nAntwort: {answer['output']}\n\n") print(f"Total Tokens: {callback.total_tokens}") print(f"Prompt Tokens: {callback.prompt_tokens}") print(f"Completion Tokens: {callback.completion_tokens}") print(f"Total Cost (USD): ${callback.total_cost}") return answer def ask_agent__chch(query, openai_api_key, sys_path, model='gpt-4'): '''Display the answer to a question.''' embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) # new_db1 = FAISS.load_local( # f'{sys_path}/data/vectorstores/eak_admin_ch_defaultdocs_faiss_index_4096', # embeddings) # new_db2 = FAISS.load_local( # f'{sys_path}/data/vectorstores/eak_admin_ch_defaultdocs_faiss_index_512', # embeddings) new_db3 = FAISS.load_local( f'{sys_path}/data/vectorstores/ch_ch_texts_faiss_index_4096', embeddings) # new_db1.merge_from(new_db2) # new_db1.merge_from(new_db3) new_db = new_db3 retriever = new_db.as_retriever() tool = create_retriever_tool( retriever, "content_of_chch_website", """ This tool is designed for an LLM that interacts with the content of the ch.ch website to retrieve documents. The chch acts as a information hub for various federal entities. A service of the Confederation, cantons and communes. The tool offers services related to: "Easy answers about life in Switzerland" The ch.ch portal is an information platform provided by the Swiss authorities. In just a few clicks, you will find straightforward answers in five languages to questions that many of you ask the authorities. """ ) tools = [tool] system_message = SystemMessage( content=""" You are an expert on the chch_website and: - Always answer questions by citing the source. - The source is the URL you receive as an answer from the content_of_chch_website tool. - If you do not know an answer, indicate "No source available, therefore no answer possible". - Never make up URLs. Only use URLs from the content_of_chch_website. - Always answer in German. """ ) llm = ChatOpenAI(openai_api_key=openai_api_key, model=model, temperature=0, n=10, verbose=True) agent_executor = create_conversational_retrieval_agent( llm, tools, verbose=False, system_message=system_message, max_token_limit=3000) # heikel print(f"\nFrage: {query}") with get_openai_callback() as callback: answer = agent_executor({"input": query}) print(f"\nAntwort: {answer['output']}\n\n") print(f"Total Tokens: {callback.total_tokens}") print(f"Prompt Tokens: {callback.prompt_tokens}") print(f"Completion Tokens: {callback.completion_tokens}") print(f"Total Cost (USD): ${callback.total_cost}") return answer if __name__ == "__main__": QUESTIONS = [ "Wann bezahlt die EAK jeweils die Rente aus?", "Was ist das SECO?", "Wer ist Kassenleiterin oder Kassenleiter der EAK?", ] for question in QUESTIONS: OPENAPI_API_KEY = "YOUR_API_KEY" SYS_PATH = "YOUR_SYSTEM_PATH" ask_agent__eak(question, OPENAPI_API_KEY, SYS_PATH)
[ "langchain.vectorstores.FAISS.load_local", "langchain.cache.SQLiteCache", "langchain.chat_models.ChatOpenAI", "langchain.agents.agent_toolkits.create_conversational_retrieval_agent", "langchain.agents.agent_toolkits.create_retriever_tool", "langchain.schema.messages.SystemMessage", "langchain.callbacks.get_openai_callback", "langchain.embeddings.OpenAIEmbeddings" ]
[((562, 626), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'f"""{SYS_PATH}/data/langchain_cache.db"""'}), "(database_path=f'{SYS_PATH}/data/langchain_cache.db')\n", (573, 626), False, 'from langchain.cache import SQLiteCache\n'), ((757, 804), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai_api_key'}), '(openai_api_key=openai_api_key)\n', (773, 804), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((819, 931), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['f"""{sys_path}/data/vectorstores/eak_admin_ch_defaultdocs_faiss_index_4096"""', 'embeddings'], {}), "(\n f'{sys_path}/data/vectorstores/eak_admin_ch_defaultdocs_faiss_index_4096',\n embeddings)\n", (835, 931), False, 'from langchain.vectorstores import FAISS\n'), ((991, 1711), 'langchain.agents.agent_toolkits.create_retriever_tool', 'create_retriever_tool', (['retriever', '"""content_of_eak_website"""', '"""\n This tool is designed for an LLM that interacts with \n the content of the EAK website to retrieve documents. \n The EAK acts as a compensation fund for various federal entities. \n Its main responsibility is overseeing the implementation of \n the 1st pillar (AHV/IV) and the family compensation fund. \n The tool offers services related to:\n - Insurance\n - Contributions\n - Employer regulations\n - Pensions\n Furthermore, it provides insights into family allowances and \n facilitates electronic data exchange with the EAK via connect.eak.\n """'], {}), '(retriever, \'content_of_eak_website\',\n """\n This tool is designed for an LLM that interacts with \n the content of the EAK website to retrieve documents. \n The EAK acts as a compensation fund for various federal entities. \n Its main responsibility is overseeing the implementation of \n the 1st pillar (AHV/IV) and the family compensation fund. \n The tool offers services related to:\n - Insurance\n - Contributions\n - Employer regulations\n - Pensions\n Furthermore, it provides insights into family allowances and \n facilitates electronic data exchange with the EAK via connect.eak.\n """\n )\n', (1012, 1711), False, 'from langchain.agents.agent_toolkits import create_conversational_retrieval_agent, create_retriever_tool\n'), ((1774, 2220), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""\n You are an expert for the eak_admin_website and:\n - Always answer questions citing the source.\n - The source is the URL you receive as a response from the eak_admin_website tool.\n - If you don\'t know an answer, state: "No source available, thus no answer possible".\n - Never invent URLs. Only use URLs from eak_admin_website.\n - Always respond in German.\n """'}), '(content=\n """\n You are an expert for the eak_admin_website and:\n - Always answer questions citing the source.\n - The source is the URL you receive as a response from the eak_admin_website tool.\n - If you don\'t know an answer, state: "No source available, thus no answer possible".\n - Never invent URLs. Only use URLs from eak_admin_website.\n - Always respond in German.\n """\n )\n', (1787, 2220), False, 'from langchain.schema.messages import SystemMessage\n'), ((2236, 2329), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'openai_api_key', 'model': 'model', 'temperature': '(0)', 'n': '(10)', 'verbose': '(True)'}), '(openai_api_key=openai_api_key, model=model, temperature=0, n=10,\n verbose=True)\n', (2246, 2329), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2432, 2553), 'langchain.agents.agent_toolkits.create_conversational_retrieval_agent', 'create_conversational_retrieval_agent', (['llm', 'tools'], {'verbose': '(False)', 'system_message': 'system_message', 'max_token_limit': '(3000)'}), '(llm, tools, verbose=False,\n system_message=system_message, max_token_limit=3000)\n', (2469, 2553), False, 'from langchain.agents.agent_toolkits import create_conversational_retrieval_agent, create_retriever_tool\n'), ((3171, 3218), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai_api_key'}), '(openai_api_key=openai_api_key)\n', (3187, 3218), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((3522, 3616), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['f"""{sys_path}/data/vectorstores/ch_ch_texts_faiss_index_4096"""', 'embeddings'], {}), "(f'{sys_path}/data/vectorstores/ch_ch_texts_faiss_index_4096',\n embeddings)\n", (3538, 3616), False, 'from langchain.vectorstores import FAISS\n'), ((3772, 4457), 'langchain.agents.agent_toolkits.create_retriever_tool', 'create_retriever_tool', (['retriever', '"""content_of_chch_website"""', '"""\n This tool is designed for an LLM that interacts with \n the content of the ch.ch website to retrieve documents. \n The chch acts as a information hub for various federal entities. \n A service of the Confederation, cantons and communes.\n The tool offers services related to:\n "Easy answers about life in Switzerland"\n The ch.ch portal is an information platform provided by \n the Swiss authorities. In just a few clicks, you will find \n straightforward answers in five languages to questions \n that many of you ask the authorities.\n """'], {}), '(retriever, \'content_of_chch_website\',\n """\n This tool is designed for an LLM that interacts with \n the content of the ch.ch website to retrieve documents. \n The chch acts as a information hub for various federal entities. \n A service of the Confederation, cantons and communes.\n The tool offers services related to:\n "Easy answers about life in Switzerland"\n The ch.ch portal is an information platform provided by \n the Swiss authorities. In just a few clicks, you will find \n straightforward answers in five languages to questions \n that many of you ask the authorities.\n """\n )\n', (3793, 4457), False, 'from langchain.agents.agent_toolkits import create_conversational_retrieval_agent, create_retriever_tool\n'), ((4520, 4986), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""\n You are an expert on the chch_website and:\n - Always answer questions by citing the source.\n - The source is the URL you receive as an answer from the content_of_chch_website tool.\n - If you do not know an answer, indicate "No source available, therefore no answer possible".\n - Never make up URLs. Only use URLs from the content_of_chch_website.\n - Always answer in German.\n """'}), '(content=\n """\n You are an expert on the chch_website and:\n - Always answer questions by citing the source.\n - The source is the URL you receive as an answer from the content_of_chch_website tool.\n - If you do not know an answer, indicate "No source available, therefore no answer possible".\n - Never make up URLs. Only use URLs from the content_of_chch_website.\n - Always answer in German.\n """\n )\n', (4533, 4986), False, 'from langchain.schema.messages import SystemMessage\n'), ((5002, 5095), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'openai_api_key', 'model': 'model', 'temperature': '(0)', 'n': '(10)', 'verbose': '(True)'}), '(openai_api_key=openai_api_key, model=model, temperature=0, n=10,\n verbose=True)\n', (5012, 5095), False, 'from langchain.chat_models import ChatOpenAI\n'), ((5198, 5319), 'langchain.agents.agent_toolkits.create_conversational_retrieval_agent', 'create_conversational_retrieval_agent', (['llm', 'tools'], {'verbose': '(False)', 'system_message': 'system_message', 'max_token_limit': '(3000)'}), '(llm, tools, verbose=False,\n system_message=system_message, max_token_limit=3000)\n', (5235, 5319), False, 'from langchain.agents.agent_toolkits import create_conversational_retrieval_agent, create_retriever_tool\n'), ((2645, 2666), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2664, 2666), False, 'from langchain.callbacks import get_openai_callback\n'), ((5411, 5432), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (5430, 5432), False, 'from langchain.callbacks import get_openai_callback\n')]
"""Create a ChatVectorDBChain for question/answering.""" from langchain.callbacks.manager import AsyncCallbackManager from langchain.callbacks.tracers import LangChainTracer from langchain.chains import ChatVectorDBChain from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT, QA_PROMPT) from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.llms import OpenAI from langchain.vectorstores.base import VectorStore def get_chain( vectorstore: VectorStore, question_handler, stream_handler, tracing: bool = False ) -> ChatVectorDBChain: """Create a ChatVectorDBChain for question/answering.""" # Construct a ChatVectorDBChain with a streaming llm for combine docs # and a separate, non-streaming llm for question generation manager = AsyncCallbackManager([]) question_manager = AsyncCallbackManager([question_handler]) stream_manager = AsyncCallbackManager([stream_handler]) if tracing: tracer = LangChainTracer() tracer.load_default_session() manager.add_handler(tracer) question_manager.add_handler(tracer) stream_manager.add_handler(tracer) question_gen_llm = OpenAI( temperature=0, verbose=True, callback_manager=question_manager, ) streaming_llm = OpenAI( streaming=True, callback_manager=stream_manager, verbose=True, temperature=0, ) question_generator = LLMChain( llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager ) doc_chain = load_qa_chain( streaming_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager ) qa = ChatVectorDBChain( vectorstore=vectorstore, combine_docs_chain=doc_chain, question_generator=question_generator, callback_manager=manager, ) return qa
[ "langchain.chains.question_answering.load_qa_chain", "langchain.callbacks.tracers.LangChainTracer", "langchain.chains.ChatVectorDBChain", "langchain.callbacks.manager.AsyncCallbackManager", "langchain.llms.OpenAI", "langchain.chains.llm.LLMChain" ]
[((894, 918), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (914, 918), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((942, 982), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[question_handler]'], {}), '([question_handler])\n', (962, 982), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1004, 1042), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (1024, 1042), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1280, 1350), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'verbose': '(True)', 'callback_manager': 'question_manager'}), '(temperature=0, verbose=True, callback_manager=question_manager)\n', (1286, 1350), False, 'from langchain.llms import OpenAI\n'), ((1402, 1490), 'langchain.llms.OpenAI', 'OpenAI', ([], {'streaming': '(True)', 'callback_manager': 'stream_manager', 'verbose': '(True)', 'temperature': '(0)'}), '(streaming=True, callback_manager=stream_manager, verbose=True,\n temperature=0)\n', (1408, 1490), False, 'from langchain.llms import OpenAI\n'), ((1552, 1645), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'question_gen_llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'callback_manager': 'manager'}), '(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT,\n callback_manager=manager)\n', (1560, 1645), False, 'from langchain.chains.llm import LLMChain\n'), ((1672, 1768), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'chain_type': '"""stuff"""', 'prompt': 'QA_PROMPT', 'callback_manager': 'manager'}), "(streaming_llm, chain_type='stuff', prompt=QA_PROMPT,\n callback_manager=manager)\n", (1685, 1768), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1789, 1930), 'langchain.chains.ChatVectorDBChain', 'ChatVectorDBChain', ([], {'vectorstore': 'vectorstore', 'combine_docs_chain': 'doc_chain', 'question_generator': 'question_generator', 'callback_manager': 'manager'}), '(vectorstore=vectorstore, combine_docs_chain=doc_chain,\n question_generator=question_generator, callback_manager=manager)\n', (1806, 1930), False, 'from langchain.chains import ChatVectorDBChain\n'), ((1076, 1093), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (1091, 1093), False, 'from langchain.callbacks.tracers import LangChainTracer\n')]
import os.path import chromadb import langchain.embeddings import win32com.client from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.llms import OpenAI from langchain.document_loaders import TextLoader from langchain.document_loaders import PyPDFLoader from langchain.document_loaders import UnstructuredPDFLoader from langchain.indexes import VectorstoreIndexCreator from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma import json from os.path import isdir, isfile, join from os import listdir import openai from optiondata import Option_data from signalManager import SignalManager loaded = False dbFolder = './vectorstore/' workspace = './workspace/' metadatas = dict() chroma: Chroma option_data = Option_data() openai.api_key = option_data.openai_api_key option_data.optionSignals.changed_checked_api.connect(lambda: reloadDB()) def getExtension(fname: str) -> str: spl = fname.split('.') if len(spl) == 1: return '' return spl[-1] def processFile(rootpath, path, fname, documents): ext = getExtension(fname) allPath = join(rootpath, path) text_splitter = RecursiveCharacterTextSplitter( chunk_size=400, chunk_overlap=0, separators=['\n\n', '\n', ' ', ''] ) if ext == 'txt': loader = TextLoader(join(allPath, fname), encoding='utf8') document = loader.load() print('loaded file {0} with TextLoader.'.format(fname)) elif ext == 'pdf': loader = UnstructuredPDFLoader(join(allPath, fname)) document = loader.load() print('loaded file {0} with UnstructuredPDFLoader.'.format(fname)) else: print("can't process file {0}".format(fname)) return documents.extend(text_splitter.split_documents(document)) def CheckFile(rootpath, path, fname): metaResult = checkMetadata(rootpath, path, fname) if metaResult == 2: # New File print('file: {0} has added to workspace'.format(fname)) createNewDocument(rootpath, path, fname) elif metaResult == 1: # File modified print('file: {0} needs to be updated'.format(fname)) updateDocument(rootpath, path, fname) else: # Recent file print('file: {0} is up to date'.format(fname)) def iterateDirectory(rootpath, path): allPath = join(rootpath, path) dirs = [d for d in listdir(allPath) if isdir(join(allPath, d))] files = [f for f in listdir(allPath) if isfile(join(allPath, f))] for d in dirs: iterateDirectory(rootpath, join(path, d)) for f in files: CheckFile(rootpath, path, f) def get_file_metadata(path, filename): return os.path.getmtime(join(os.path.abspath(path), filename)) def create_or_update_metadata(workspacePath, filePath, fileName, docID, idxNum): global metadatas path = join(workspacePath, filePath) meta = {'path': join(filePath, fileName), 'modified': get_file_metadata(path, fileName), 'docID': docID, 'idxNum': idxNum} metadatas['files'][join(filePath, fileName)] = meta def checkMetadata(workspacePath, filePath, fileName) -> int: # 0 : same, 1 : not same, 2 : not found global metadatas file = join(filePath, fileName) if file not in metadatas['files']: return 2 modified_origin = metadatas['files'][file]['modified'] path = join(workspacePath, filePath) modified = get_file_metadata(path, fileName) if modified == modified_origin: return 0 else: return 1 def createNewDocument(workspacePath, filePath, fileName): global chroma global metadatas idx = metadatas['lastID'] metadatas['lastID'] = metadatas['lastID'] + 1 docs = [] processFile(workspacePath, filePath, fileName, docs) ids = [] for i in range(len(docs)): ids.append('{0}d{1}'.format(idx, i)) embedding = OpenAIEmbeddings() if len(docs) != 0: chroma.add_documents(documents=docs, ids=ids) create_or_update_metadata(workspacePath, filePath, fileName, idx, len(docs)) def updateDocument(workspacePath, filePath, fileName): global chroma global metadatas file = join(filePath, fileName) docs = [] processFile(workspacePath, filePath, fileName, docs) idx = metadatas['files'][file]['docID'] idNum = metadatas['files'][file]['idxNum'] coll = chroma._client.get_collection('langchain') ids = [] newIds = [] for i in range(idNum): ids.append('{0}d{1}'.format(idx, i)) for i in range(len(docs)): newIds.append('{0}d{1}'.format(idx, i)) coll.delete(ids=ids) embedding = OpenAIEmbeddings() chroma.add_documents(documents=docs, ids=newIds) create_or_update_metadata(workspacePath, filePath, fileName, idx, len(docs)) def initMetadata(): global metadatas metadatas['files'] = dict() metadatas['lastID'] = 0 saveMetadata(dbFolder + '/metadata.json') def saveMetadata(path): global metadatas with open(path, "w") as f: json.dump(metadatas, f, indent=4) def loadMetadata(path): global metadatas with open(path, "r") as f: metadatas = json.load(f) f.close() def createDB(): global chroma from chromadb.config import Settings embedding = OpenAIEmbeddings() chroma = Chroma( persist_directory=dbFolder, embedding_function=embedding ) initMetadata() iterateDirectory(workspace, '') saveMetadata(dbFolder + 'metadata.json') chroma.persist() def loadDB(): global loaded if loaded: return global chroma try: embedding = OpenAIEmbeddings() chroma = Chroma(persist_directory=dbFolder, embedding_function=embedding) loadMetadata(join(dbFolder, 'metadata.json')) iterateDirectory(workspace, '') saveMetadata(join(dbFolder, 'metadata.json')) chroma.persist() loaded = True except: print('failed to loadDB') def reloadDB(): print('reloading DB') option_data.load_option() openai.api_key = option_data.openai_api_key loadDB() def promptLangchain(query): global chroma if chroma is None: print("chroma didn't set") return 'err' retriever = chroma.as_retriever() openai = OpenAI() openai.max_tokens = 256 qa = RetrievalQA.from_chain_type( llm=ChatOpenAI(temperature=option_data.temperature), chain_type='stuff', retriever=retriever ) return qa.run(query)
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.llms.OpenAI", "langchain.chat_models.ChatOpenAI", "langchain.embeddings.OpenAIEmbeddings", "langchain.vectorstores.Chroma" ]
[((887, 900), 'optiondata.Option_data', 'Option_data', ([], {}), '()\n', (898, 900), False, 'from optiondata import Option_data\n'), ((1242, 1262), 'os.path.join', 'join', (['rootpath', 'path'], {}), '(rootpath, path)\n', (1246, 1262), False, 'from os.path import isdir, isfile, join\n'), ((1284, 1388), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(400)', 'chunk_overlap': '(0)', 'separators': "['\\n\\n', '\\n', ' ', '']"}), "(chunk_size=400, chunk_overlap=0, separators=\n ['\\n\\n', '\\n', ' ', ''])\n", (1314, 1388), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((2460, 2480), 'os.path.join', 'join', (['rootpath', 'path'], {}), '(rootpath, path)\n', (2464, 2480), False, 'from os.path import isdir, isfile, join\n'), ((2969, 2998), 'os.path.join', 'join', (['workspacePath', 'filePath'], {}), '(workspacePath, filePath)\n', (2973, 2998), False, 'from os.path import isdir, isfile, join\n'), ((3354, 3378), 'os.path.join', 'join', (['filePath', 'fileName'], {}), '(filePath, fileName)\n', (3358, 3378), False, 'from os.path import isdir, isfile, join\n'), ((3506, 3535), 'os.path.join', 'join', (['workspacePath', 'filePath'], {}), '(workspacePath, filePath)\n', (3510, 3535), False, 'from os.path import isdir, isfile, join\n'), ((4021, 4039), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4037, 4039), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((4309, 4333), 'os.path.join', 'join', (['filePath', 'fileName'], {}), '(filePath, fileName)\n', (4313, 4333), False, 'from os.path import isdir, isfile, join\n'), ((4771, 4789), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4787, 4789), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((5433, 5451), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (5449, 5451), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((5465, 5529), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'dbFolder', 'embedding_function': 'embedding'}), '(persist_directory=dbFolder, embedding_function=embedding)\n', (5471, 5529), False, 'from langchain.vectorstores import Chroma\n'), ((6439, 6447), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (6445, 6447), False, 'from langchain.llms import OpenAI\n'), ((3019, 3043), 'os.path.join', 'join', (['filePath', 'fileName'], {}), '(filePath, fileName)\n', (3023, 3043), False, 'from os.path import isdir, isfile, join\n'), ((3185, 3209), 'os.path.join', 'join', (['filePath', 'fileName'], {}), '(filePath, fileName)\n', (3189, 3209), False, 'from os.path import isdir, isfile, join\n'), ((5159, 5192), 'json.dump', 'json.dump', (['metadatas', 'f'], {'indent': '(4)'}), '(metadatas, f, indent=4)\n', (5168, 5192), False, 'import json\n'), ((5309, 5321), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5318, 5321), False, 'import json\n'), ((5785, 5803), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (5801, 5803), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((5821, 5885), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'dbFolder', 'embedding_function': 'embedding'}), '(persist_directory=dbFolder, embedding_function=embedding)\n', (5827, 5885), False, 'from langchain.vectorstores import Chroma\n'), ((1464, 1484), 'os.path.join', 'join', (['allPath', 'fname'], {}), '(allPath, fname)\n', (1468, 1484), False, 'from os.path import isdir, isfile, join\n'), ((2504, 2520), 'os.listdir', 'listdir', (['allPath'], {}), '(allPath)\n', (2511, 2520), False, 'from os import listdir\n'), ((2573, 2589), 'os.listdir', 'listdir', (['allPath'], {}), '(allPath)\n', (2580, 2589), False, 'from os import listdir\n'), ((2673, 2686), 'os.path.join', 'join', (['path', 'd'], {}), '(path, d)\n', (2677, 2686), False, 'from os.path import isdir, isfile, join\n'), ((5907, 5938), 'os.path.join', 'join', (['dbFolder', '"""metadata.json"""'], {}), "(dbFolder, 'metadata.json')\n", (5911, 5938), False, 'from os.path import isdir, isfile, join\n'), ((6001, 6032), 'os.path.join', 'join', (['dbFolder', '"""metadata.json"""'], {}), "(dbFolder, 'metadata.json')\n", (6005, 6032), False, 'from os.path import isdir, isfile, join\n'), ((6526, 6573), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'option_data.temperature'}), '(temperature=option_data.temperature)\n', (6536, 6573), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1662, 1682), 'os.path.join', 'join', (['allPath', 'fname'], {}), '(allPath, fname)\n', (1666, 1682), False, 'from os.path import isdir, isfile, join\n'), ((2530, 2546), 'os.path.join', 'join', (['allPath', 'd'], {}), '(allPath, d)\n', (2534, 2546), False, 'from os.path import isdir, isfile, join\n'), ((2600, 2616), 'os.path.join', 'join', (['allPath', 'f'], {}), '(allPath, f)\n', (2604, 2616), False, 'from os.path import isdir, isfile, join\n')]
""" A script for retrieval-based question answering using the langchain library. This script demonstrates how to integrate a retrieval system with a chat model for answering questions. It utilizes Chroma for retrieval of relevant information and ChatOpenAI for generating answers based on the retrieved content. The RedundantFilterRetriever is used for efficient retrieval, filtering out redundant information and focusing on the most relevant content. This setup is ideal for answering questions with context from a specific knowledge base. Features: - Initialize ChatOpenAI for language model-based interactions. - Use OpenAI embeddings for document retrieval. - Load a Chroma database for document retrieval based on embeddings. - Use RedundantFilterRetriever for enhanced retrieval efficiency. - Set up a RetrievalQA chain combining the chat model and the retriever. - Answer a specific question using the RetrievalQA chain. Usage: Run the script to ask a question about the English language and get an answer based on retrieved content from the Chroma database and processed through the RedundantFilterRetriever. """ import langchain from dotenv import load_dotenv from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores.chroma import Chroma from mod_05_redundant_filter_retriever import RedundantFilterRetriever langchain.debug = True load_dotenv() # Initialize a ChatOpenAI instance for language model interactions. chat = ChatOpenAI() # Initialize OpenAI embeddings for document retrieval. embeddings = OpenAIEmbeddings() # Load a Chroma database for document retrieval. db = Chroma(persist_directory="emb", embedding_function=embeddings) # Initialize the RedundantFilterRetriever with OpenAI embeddings and Chroma database. # This retriever filters out redundant information, focusing on the most relevant content. retriever = RedundantFilterRetriever(embeddings=embeddings, chroma=db) # Configure the RetrievalQA chain with the chat model and the enhanced retriever. # This chain combines the capabilities of ChatOpenAI and # RedundantFilterRetriever for efficient question answering. # https://python.langchain.com/docs/modules/chains/document/ chain = RetrievalQA.from_chain_type(llm=chat, retriever=retriever, chain_type="stuff") # Run the chain to answer a question based on retrieved content. result = chain.run("What is an interesting fact about the English language?") print(result)
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.vectorstores.chroma.Chroma", "langchain.chains.RetrievalQA.from_chain_type", "langchain.chat_models.ChatOpenAI" ]
[((1459, 1472), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1470, 1472), False, 'from dotenv import load_dotenv\n'), ((1549, 1561), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1559, 1561), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1631, 1649), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1647, 1649), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1705, 1767), 'langchain.vectorstores.chroma.Chroma', 'Chroma', ([], {'persist_directory': '"""emb"""', 'embedding_function': 'embeddings'}), "(persist_directory='emb', embedding_function=embeddings)\n", (1711, 1767), False, 'from langchain.vectorstores.chroma import Chroma\n'), ((1958, 2016), 'mod_05_redundant_filter_retriever.RedundantFilterRetriever', 'RedundantFilterRetriever', ([], {'embeddings': 'embeddings', 'chroma': 'db'}), '(embeddings=embeddings, chroma=db)\n', (1982, 2016), False, 'from mod_05_redundant_filter_retriever import RedundantFilterRetriever\n'), ((2287, 2365), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'retriever': 'retriever', 'chain_type': '"""stuff"""'}), "(llm=chat, retriever=retriever, chain_type='stuff')\n", (2314, 2365), False, 'from langchain.chains import RetrievalQA\n')]
import os import gradio as gr import langchain from langchain.llms import OpenAI from langchain.chains import RetrievalQAWithSourcesChain from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.document_loaders import UnstructuredURLLoader from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import FAISS from dotenv import load_dotenv load_dotenv() # take .env variables llm = OpenAI(temperature=0.9, max_tokens=500) def echo(message, history, links): question = message urls = links.split() loader = UnstructuredURLLoader(urls) data = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) docs = text_splitter.split_documents(data) vectorindex_openai = FAISS.from_documents(docs, OpenAIEmbeddings()) chain = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=vectorindex_openai.as_retriever()) response = chain({"question": question}, return_only_outputs=True) formatted_response = response['answer'] if 'sources' in response: formatted_response += "\nSources: " + response['sources'] return formatted_response demo = gr.ChatInterface(echo, additional_inputs=[ gr.Textbox("[Paste Links Here]", label="News Links"), ] ) if __name__ == "__main__": demo.launch(show_api=False)
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.embeddings.OpenAIEmbeddings", "langchain.llms.OpenAI", "langchain.document_loaders.UnstructuredURLLoader" ]
[((470, 483), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (481, 483), False, 'from dotenv import load_dotenv\n'), ((512, 551), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)', 'max_tokens': '(500)'}), '(temperature=0.9, max_tokens=500)\n', (518, 551), False, 'from langchain.llms import OpenAI\n'), ((649, 676), 'langchain.document_loaders.UnstructuredURLLoader', 'UnstructuredURLLoader', (['urls'], {}), '(urls)\n', (670, 676), False, 'from langchain.document_loaders import UnstructuredURLLoader\n'), ((727, 793), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (757, 793), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((898, 916), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (914, 916), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1368, 1420), 'gradio.Textbox', 'gr.Textbox', (['"""[Paste Links Here]"""'], {'label': '"""News Links"""'}), "('[Paste Links Here]', label='News Links')\n", (1378, 1420), True, 'import gradio as gr\n')]
## novel_generator.py import langchain import openai from typing import Dict, Any from .dialogue_enhancer import DialogueEnhancer from .script_transitioner import ScriptTransitioner from .embedding_storage import EmbeddingStorage from .custom_agent import CustomAgent class NovelGenerator: def __init__(self, prompt: str, writing_style: str, chapter_count: int, genre: str, dialogue_enhancer: DialogueEnhancer, script_transitioner: ScriptTransitioner, embedding_storage: EmbeddingStorage, custom_agent: CustomAgent): """ Initialize a NovelGenerator with the specified parameters. Parameters: prompt (str): The prompt for the novel. writing_style (str): The writing style of the novel. chapter_count (int): The number of chapters in the novel. genre (str): The genre of the novel. dialogue_enhancer (DialogueEnhancer): The dialogue enhancer. script_transitioner (ScriptTransitioner): The script transitioner. embedding_storage (EmbeddingStorage): The embedding storage. custom_agent (CustomAgent): The custom agent. """ self.prompt = prompt self.writing_style = writing_style self.chapter_count = chapter_count self.genre = genre self.generator = langchain.NovelGenerator() self.openai_model = openai.GPT3Model() self.dialogue_enhancer = dialogue_enhancer self.script_transitioner = script_transitioner self.embedding_storage = embedding_storage self.custom_agent = custom_agent def generate_novel(self) -> str: """ Generate a novel based on the specified parameters. Returns: str: The generated novel. """ # Generate novel text novel_text = self.generator.generate(self.prompt, self.writing_style, self.chapter_count, self.genre) return novel_text
[ "langchain.NovelGenerator" ]
[((1322, 1348), 'langchain.NovelGenerator', 'langchain.NovelGenerator', ([], {}), '()\n', (1346, 1348), False, 'import langchain\n'), ((1377, 1395), 'openai.GPT3Model', 'openai.GPT3Model', ([], {}), '()\n', (1393, 1395), False, 'import openai\n')]
import streamlit as st import openai import langchain import os from dotenv import load_dotenv from PyPDF2 import PdfReader from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings from langchain.vectorstores import FAISS from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory from langchain.chains import ConversationalRetrievalChain from htmlTemplates import css, bot_template, user_template from langchain.llms import HuggingFaceHub def get_pdf_text(pdf_docs): text = "" for pdf in pdf_docs: pdf_reader = PdfReader(pdf) for page in pdf_reader.pages: text += page.extract_text() return text # pdfFileObj = open('your_pdf_name.pdf', 'rb') # pdfReader = PyPDF2.PdfFileReader(pdfFileObj) # pdf = '' # for i in range(0, pdfReader.numPages): # pageObj = pdfReader.getPage(i) # page = pageObj.extractText() # pdf = page + ' ' # print(pdf) # pdf = '' # for pdf in pdf_docs: # pdfFileObj = open(pdf, 'rb') # pdfReader = PyPDF2.PdfFileReader(pdfFileObj) # for i in range(0, pdfReader.numPages): # pageObj = pdfReader.getPage(i) # page = pageObj.extractText() # pdf = page + ' ' # return pdf def get_text_chunks(text): text_splitter = CharacterTextSplitter( separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len ) chunks = text_splitter.split_text(text) return chunks def get_vectorstore(text_chunks): embeddings = OpenAIEmbeddings() # embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl") vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings) return vectorstore def get_conversation_chain(vectorstore): llm = ChatOpenAI() # llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512}) memory = ConversationBufferMemory( memory_key='chat_history', return_messages=True) conversation_chain = ConversationalRetrievalChain.from_llm( llm=llm, retriever=vectorstore.as_retriever(), memory=memory ) return conversation_chain def handle_userinput(user_question): response = st.session_state.conversation({'question': user_question}) st.session_state.chat_history = response['chat_history'] for i, message in enumerate(st.session_state.chat_history): if i % 2 == 0: st.write(user_template.replace( "{{MSG}}", message.content), unsafe_allow_html=True) else: st.write(bot_template.replace( "{{MSG}}", message.content), unsafe_allow_html=True) def main(): load_dotenv() openai.api_key = st.secrets["OPENAI_API_KEY"] st.set_page_config(page_title="Chat with multiple PDFs", page_icon=":books:") st.write(css, unsafe_allow_html=True) if "conversation" not in st.session_state: st.session_state.conversation = None if "chat_history" not in st.session_state: st.session_state.chat_history = None st.header("Chat with multiple PDFs :books:") user_question = st.text_input("Ask a question about your documents:") if user_question: handle_userinput(user_question) with st.sidebar: st.subheader("Your documents") pdf_docs = st.file_uploader( "Upload your PDFs here and click on 'Process'", accept_multiple_files=True) if st.button("Process"): with st.spinner("Processing"): # get pdf text raw_text = get_pdf_text(pdf_docs) # get the text chunks text_chunks = get_text_chunks(raw_text) # create vector store vectorstore = get_vectorstore(text_chunks) # create conversation chain st.session_state.conversation = get_conversation_chain( vectorstore) if __name__ == '__main__': main()
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.memory.ConversationBufferMemory", "langchain.chat_models.ChatOpenAI", "langchain.vectorstores.FAISS.from_texts", "langchain.embeddings.OpenAIEmbeddings" ]
[((1430, 1528), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (1451, 1528), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1678, 1696), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1694, 1696), False, 'from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings\n'), ((1799, 1856), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', ([], {'texts': 'text_chunks', 'embedding': 'embeddings'}), '(texts=text_chunks, embedding=embeddings)\n', (1815, 1856), False, 'from langchain.vectorstores import FAISS\n'), ((1933, 1945), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1943, 1945), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2069, 2142), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (2093, 2142), False, 'from langchain.memory import ConversationBufferMemory\n'), ((2391, 2449), 'streamlit.session_state.conversation', 'st.session_state.conversation', (["{'question': user_question}"], {}), "({'question': user_question})\n", (2420, 2449), True, 'import streamlit as st\n'), ((2856, 2869), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2867, 2869), False, 'from dotenv import load_dotenv\n'), ((2924, 3001), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with multiple PDFs"""', 'page_icon': '""":books:"""'}), "(page_title='Chat with multiple PDFs', page_icon=':books:')\n", (2942, 3001), True, 'import streamlit as st\n'), ((3029, 3066), 'streamlit.write', 'st.write', (['css'], {'unsafe_allow_html': '(True)'}), '(css, unsafe_allow_html=True)\n', (3037, 3066), True, 'import streamlit as st\n'), ((3257, 3301), 'streamlit.header', 'st.header', (['"""Chat with multiple PDFs :books:"""'], {}), "('Chat with multiple PDFs :books:')\n", (3266, 3301), True, 'import streamlit as st\n'), ((3322, 3375), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your documents:"""'], {}), "('Ask a question about your documents:')\n", (3335, 3375), True, 'import streamlit as st\n'), ((651, 665), 'PyPDF2.PdfReader', 'PdfReader', (['pdf'], {}), '(pdf)\n', (660, 665), False, 'from PyPDF2 import PdfReader\n'), ((3468, 3498), 'streamlit.subheader', 'st.subheader', (['"""Your documents"""'], {}), "('Your documents')\n", (3480, 3498), True, 'import streamlit as st\n'), ((3518, 3614), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your PDFs here and click on \'Process\'"""'], {'accept_multiple_files': '(True)'}), '("Upload your PDFs here and click on \'Process\'",\n accept_multiple_files=True)\n', (3534, 3614), True, 'import streamlit as st\n'), ((3635, 3655), 'streamlit.button', 'st.button', (['"""Process"""'], {}), "('Process')\n", (3644, 3655), True, 'import streamlit as st\n'), ((2620, 2669), 'htmlTemplates.user_template.replace', 'user_template.replace', (['"""{{MSG}}"""', 'message.content'], {}), "('{{MSG}}', message.content)\n", (2641, 2669), False, 'from htmlTemplates import css, bot_template, user_template\n'), ((2747, 2795), 'htmlTemplates.bot_template.replace', 'bot_template.replace', (['"""{{MSG}}"""', 'message.content'], {}), "('{{MSG}}', message.content)\n", (2767, 2795), False, 'from htmlTemplates import css, bot_template, user_template\n'), ((3674, 3698), 'streamlit.spinner', 'st.spinner', (['"""Processing"""'], {}), "('Processing')\n", (3684, 3698), True, 'import streamlit as st\n')]
import json import random import langchain from dotenv import load_dotenv import gradio as gr import logging from langchain.chains import LLMChain from langchain.prompts.chat import ( ChatPromptTemplate ) import pydantic.v1.error_wrappers from typing import Any, Dict, Tuple from transist.llm import create_openai_llm, parse_json_maybe_invalid, ExtractionOutputParser from transist.prompt import system_prompt, draft_question_prompt, extract_facts_prompt logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) thinking = [ "Give me a few seconds to understand what you told me.", "Let me take a moment to process the information you've shared", "Please allow me a short pause to fully comprehend the details you provided." ] sufficient_facts_response = "Sufficient facts to generate section {section}" move_to_next_section = "Let's proceed by moving on to the next section about {section}" class CarbonAssistant(object): section_order = [ (0, "info"), (2, "methodology"), (3, "quantification"), (4, "monitoring"), (5, "safeguards"), (1, "details"), (99, "closing") ] def __init__(self): self.state = "extract" self.sector = "afolu" self.extract_parser = ExtractionOutputParser() self.curr_section_index = 0 self.curr_section_facts: Dict = self._load_section_facts(self.curr_section_index) self.completed_section: Dict[Tuple, Dict] = {} self.curr_questions = [] def _load_section_facts(self, section_index): section_template = self._section_template(section_index) return json.loads(section_template) def _section_template(self, section_index): section_number, section = CarbonAssistant.section_order[section_index] section_dir = f"{section_number:02d}_{section}" section_file = f"{section_number:02d}_{self.sector}_{section}.json" filepath = f"data/templates/sector={self.sector}/{section_dir}/{section_file}" log.info("Getting template for %s from file: %s", section, filepath) return open(filepath, "r").read() @property def curr_section(self): return CarbonAssistant.section_order[self.curr_section_index][1] def design(self, message, history, openai_api_key=None): try: llm = create_openai_llm(openai_api_key) for out in self.design_with_llm(llm, message, history): yield out except pydantic.v1.error_wrappers.ValidationError as e: if any(["OPENAI_API_KEY" in error['msg']for error in e.errors()]): raise gr.Error("An OpenAI API key needs to be provided in the Additional Inputs section below") else: raise gr.Error(pydantic.v1.error_wrappers.display_errors(e.errors())) def design_with_llm(self, llm, message, history): if self.state == "draft": questions = self.draft_questions(llm, self.curr_section_facts) if self.sufficient_to_generate(questions): yield sufficient_facts_response % self.curr_section self.complete_section() if not self.next_section(): self.state = "generate" yield "Generating document sections" else: self.state = "draft" yield move_to_next_section % self.curr_section for out in self.design_with_llm(llm, message, history): yield out else: self.curr_questions = questions self.state = "extract" yield "Let's continue gathering information about your carbon project" yield questions elif self.state == "extract": yield f"Thank you for providing information about your project. {random.choice(thinking)}" extracted = self.extract_facts(llm, message, history, self.curr_section_facts) if extracted.get("keys_updated", []): extracted_facts = extracted.get("extracted_project_facts", {}) self.curr_section_facts.update(extracted_facts) log.info("Updated facts doc: %s", self.curr_section_facts) self.state = "draft" else: self.state = "explore" for out in self.design_with_llm(llm, message, history): yield out elif self.state == "explore": yield "I understand that you need some help in answering these questions." yield "Give me a moment to try and find some relevant information which can help." explore_results = self.explore(llm, message, history, self.curr_section_facts) self.state = "extract" yield explore_results def draft_questions(self, llm, facts_document): questions_chain = LLMChain( llm=llm, prompt=ChatPromptTemplate.from_messages([system_prompt, draft_question_prompt]), output_key="questions", verbose=True) questions = questions_chain.predict(json_template=json.dumps(facts_document)) return questions def extract_facts(self, llm, message, history, facts_document) -> Dict[Any, Any]: extract_chain = LLMChain( llm=llm, prompt=ChatPromptTemplate.from_messages([system_prompt, extract_facts_prompt]), output_parser=self.extract_parser, output_key="extracted", verbose=True) extracted: Dict[str, Any] = extract_chain.predict_and_parse( project_facts_document=json.dumps(facts_document), project_information=message) if not extracted: log.warning("Could not extracted using extract chain: '%s'", extracted) return extracted def explore(self, llm, message, history, facts_document): return f"""Some relevant search results to\n\nUser: {message} In context of \nhistory: {history}""" @staticmethod def sufficient_to_generate(drafted_questions) -> bool: return drafted_questions.strip() == "GENERATE" def complete_section(self): self.curr_questions = [] curr_section = CarbonAssistant.section_order[self.curr_section_index] if curr_section in self.completed_section: completed_facts = self.completed_section.get(curr_section) completed_facts.update(self.curr_section_facts) else: self.completed_section[curr_section] = self.curr_section_facts def next_section(self) -> bool: if self.curr_section_index + 1 >= len(CarbonAssistant.section_order): self.curr_section_facts = {} return False else: assert (0, "info") in self.complete_section(), \ "Cannot move to next section without completing project info" self.curr_section_index += 1 self.curr_section_facts = self._load_section_facts(self.curr_section_index) project_info_facts = self.completed_section[(0, "info")] self.curr_section_facts.update(project_info_facts) return True def main(): langchain.verbose = True assistant = CarbonAssistant() openai_api_key = gr.Textbox(placeholder="Please enter you OpenAI API key here", label="Open AI API Key", render=False) demo = gr.ChatInterface( title="Verra Carbon Project Design Assistant", description=""" I'm a virtual assistant who can help you in writing the baseline section for your carbon project to be registered with the Verra registry. Please start by telling me something about your project. """, textbox=gr.Textbox(placeholder="Start by telling me about your project", scale=7), fn=assistant.design, additional_inputs=[openai_api_key], examples=[["The name of my project is BrewHat Bunguluru Waste Management", None], ["My project falls under the Waste Management sectoral scope", None], ["My project is about reducing GHG emission from biomass waste", None]] ) demo.queue().launch() if __name__ == "__main__": # Take environment variables from .env file load_dotenv() main()
[ "langchain.prompts.chat.ChatPromptTemplate.from_messages" ]
[((462, 501), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (481, 501), False, 'import logging\n'), ((508, 535), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (525, 535), False, 'import logging\n'), ((7347, 7453), 'gradio.Textbox', 'gr.Textbox', ([], {'placeholder': '"""Please enter you OpenAI API key here"""', 'label': '"""Open AI API Key"""', 'render': '(False)'}), "(placeholder='Please enter you OpenAI API key here', label=\n 'Open AI API Key', render=False)\n", (7357, 7453), True, 'import gradio as gr\n'), ((8396, 8409), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (8407, 8409), False, 'from dotenv import load_dotenv\n'), ((1289, 1313), 'transist.llm.ExtractionOutputParser', 'ExtractionOutputParser', ([], {}), '()\n', (1311, 1313), False, 'from transist.llm import create_openai_llm, parse_json_maybe_invalid, ExtractionOutputParser\n'), ((1659, 1687), 'json.loads', 'json.loads', (['section_template'], {}), '(section_template)\n', (1669, 1687), False, 'import json\n'), ((2363, 2396), 'transist.llm.create_openai_llm', 'create_openai_llm', (['openai_api_key'], {}), '(openai_api_key)\n', (2380, 2396), False, 'from transist.llm import create_openai_llm, parse_json_maybe_invalid, ExtractionOutputParser\n'), ((7840, 7913), 'gradio.Textbox', 'gr.Textbox', ([], {'placeholder': '"""Start by telling me about your project"""', 'scale': '(7)'}), "(placeholder='Start by telling me about your project', scale=7)\n", (7850, 7913), True, 'import gradio as gr\n'), ((4984, 5056), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_prompt, draft_question_prompt]'], {}), '([system_prompt, draft_question_prompt])\n', (5016, 5056), False, 'from langchain.prompts.chat import ChatPromptTemplate\n'), ((5178, 5204), 'json.dumps', 'json.dumps', (['facts_document'], {}), '(facts_document)\n', (5188, 5204), False, 'import json\n'), ((5392, 5463), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_prompt, extract_facts_prompt]'], {}), '([system_prompt, extract_facts_prompt])\n', (5424, 5463), False, 'from langchain.prompts.chat import ChatPromptTemplate\n'), ((5679, 5705), 'json.dumps', 'json.dumps', (['facts_document'], {}), '(facts_document)\n', (5689, 5705), False, 'import json\n'), ((2656, 2755), 'gradio.Error', 'gr.Error', (['"""An OpenAI API key needs to be provided in the Additional Inputs section below"""'], {}), "(\n 'An OpenAI API key needs to be provided in the Additional Inputs section below'\n )\n", (2664, 2755), True, 'import gradio as gr\n'), ((3901, 3924), 'random.choice', 'random.choice', (['thinking'], {}), '(thinking)\n', (3914, 3924), False, 'import random\n')]
# Import Langchain modules from langchain.document_loaders import PyPDFLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain.chains import RetrievalQA from langchain.llms import OpenAI # Import Environment Modules import os from dotenv import load_dotenv # Import API Modules from fastapi import FastAPI from fastapi.responses import HTMLResponse, JSONResponse import uvicorn # Import Other Modules import json import logging import warnings warnings.filterwarnings("ignore") # Load configuration with open('config.json', 'r') as f: config = json.load(f) # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') def environment_setup() -> None: """ Load environment variables and set OpenAI API key. """ load_dotenv() os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") def load_documents(document_path: str) -> list: """ Load the pdf file and split it into pages. """ try: loader = PyPDFLoader(document_path) pages = loader.load_and_split() return pages except Exception as e: logging.error(f"Error loading documents from {document_path}: {e}") return [] def split_documents(pages: list) -> list: """ Split the pages into chunks. """ try: text_splitter = RecursiveCharacterTextSplitter( chunk_size=200, chunk_overlap=0, length_function=len, is_separator_regex=True, ) docs = text_splitter.split_documents(pages) return docs except Exception as e: logging.error(f"Error splitting documents: {e}") return [] def process_documents() -> list: """ Process all documents in the specified path. """ document_paths = [os.path.join(config['DOCUMENTS_PATH'], f) for f in os.listdir(config['DOCUMENTS_PATH']) if f.endswith(".pdf")] all_docs = [] for document_path in document_paths: pages = load_documents(document_path) docs = split_documents(pages) all_docs.extend(docs) return all_docs def embeddings(docs: list) -> FAISS: """ Load the embeddings and store them in a vector store. """ try: embeddings = OpenAIEmbeddings() db = FAISS.from_documents(docs, embeddings) return db except Exception as e: logging.error(f"Error creating embeddings: {e}") return None def initialize_model() -> OpenAI: """ Initialize the model. """ llm = OpenAI() return llm def LLM_chain(llm: OpenAI, db: FAISS) -> RetrievalQA: """ Create a retrieval chain with the LLM and vector store. """ chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=db.as_retriever(search_kwargs={"k": 5})) return chain def initialize_all() -> tuple: """ Initialize all components. """ environment_setup() docs = process_documents() db = embeddings(docs) llm = initialize_model() llm_chain = LLM_chain(llm, db) return llm_chain, db def process_message(chain: RetrievalQA, user_message: str, db: FAISS) -> str: """ Process the user's message and return the bot's response. """ try: query = user_message docs = db.similarity_search(query) result = chain.run(input_documents=docs, query=query) return result except Exception as e: logging.error(f"Error generating response: {e}", exc_info=True) return "Sorry, I couldn't understand your message." def setup_fastapi(llm_chain: RetrievalQA, db: FAISS) -> FastAPI: """ Setup FastAPI with routes. """ app = FastAPI() @app.get("/", response_class=HTMLResponse) def read_root() -> HTMLResponse: """ Serve the chatbot HTML page. """ try: with open('templates/chatbot.html', 'r') as f: html_content = f.read() return HTMLResponse(content=html_content, status_code=200) except Exception as e: logging.error(f"Error reading HTML file: {e}", exc_info=True) return HTMLResponse(content="Sorry, something went wrong.", status_code=500) @app.get("/chatbot/{user_message}") def get_bot_response(user_message: str) -> JSONResponse: """ Process the user's message and return the bot's response. """ try: bot_response = process_message(llm_chain, user_message, db) return JSONResponse(content={"answer": bot_response}) except Exception as e: logging.error(f"Error processing message: {e}", exc_info=True) return JSONResponse(content={"answer": "Sorry, something went wrong."}) return app if __name__ == "__main__": try: llm_chain, db = initialize_all() fastapi_app = setup_fastapi(llm_chain, db) uvicorn.run(fastapi_app, host="0.0.0.0", port=8000) except Exception as e: logging.error(f"Error during initialization: {e}", exc_info=True)
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.llms.OpenAI", "langchain.vectorstores.FAISS.from_documents", "langchain.document_loaders.PyPDFLoader", "langchain.embeddings.OpenAIEmbeddings" ]
[((573, 606), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (596, 606), False, 'import warnings\n'), ((712, 808), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (731, 808), False, 'import logging\n'), ((678, 690), 'json.load', 'json.load', (['f'], {}), '(f)\n', (687, 690), False, 'import json\n'), ((913, 926), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (924, 926), False, 'from dotenv import load_dotenv\n'), ((962, 989), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (971, 989), False, 'import os\n'), ((2654, 2662), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2660, 2662), False, 'from langchain.llms import OpenAI\n'), ((3800, 3809), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (3807, 3809), False, 'from fastapi import FastAPI\n'), ((1128, 1154), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['document_path'], {}), '(document_path)\n', (1139, 1154), False, 'from langchain.document_loaders import PyPDFLoader\n'), ((1462, 1575), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(200)', 'chunk_overlap': '(0)', 'length_function': 'len', 'is_separator_regex': '(True)'}), '(chunk_size=200, chunk_overlap=0,\n length_function=len, is_separator_regex=True)\n', (1492, 1575), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1926, 1967), 'os.path.join', 'os.path.join', (["config['DOCUMENTS_PATH']", 'f'], {}), "(config['DOCUMENTS_PATH'], f)\n", (1938, 1967), False, 'import os\n'), ((2374, 2392), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2390, 2392), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2406, 2444), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (2426, 2444), False, 'from langchain.vectorstores import FAISS\n'), ((5019, 5070), 'uvicorn.run', 'uvicorn.run', (['fastapi_app'], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "(fastapi_app, host='0.0.0.0', port=8000)\n", (5030, 5070), False, 'import uvicorn\n'), ((1251, 1318), 'logging.error', 'logging.error', (['f"""Error loading documents from {document_path}: {e}"""'], {}), "(f'Error loading documents from {document_path}: {e}')\n", (1264, 1318), False, 'import logging\n'), ((1738, 1786), 'logging.error', 'logging.error', (['f"""Error splitting documents: {e}"""'], {}), "(f'Error splitting documents: {e}')\n", (1751, 1786), False, 'import logging\n'), ((1977, 2013), 'os.listdir', 'os.listdir', (["config['DOCUMENTS_PATH']"], {}), "(config['DOCUMENTS_PATH'])\n", (1987, 2013), False, 'import os\n'), ((2498, 2546), 'logging.error', 'logging.error', (['f"""Error creating embeddings: {e}"""'], {}), "(f'Error creating embeddings: {e}')\n", (2511, 2546), False, 'import logging\n'), ((3552, 3615), 'logging.error', 'logging.error', (['f"""Error generating response: {e}"""'], {'exc_info': '(True)'}), "(f'Error generating response: {e}', exc_info=True)\n", (3565, 3615), False, 'import logging\n'), ((4087, 4138), 'fastapi.responses.HTMLResponse', 'HTMLResponse', ([], {'content': 'html_content', 'status_code': '(200)'}), '(content=html_content, status_code=200)\n', (4099, 4138), False, 'from fastapi.responses import HTMLResponse, JSONResponse\n'), ((4629, 4675), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': "{'answer': bot_response}"}), "(content={'answer': bot_response})\n", (4641, 4675), False, 'from fastapi.responses import HTMLResponse, JSONResponse\n'), ((5106, 5171), 'logging.error', 'logging.error', (['f"""Error during initialization: {e}"""'], {'exc_info': '(True)'}), "(f'Error during initialization: {e}', exc_info=True)\n", (5119, 5171), False, 'import logging\n'), ((4182, 4243), 'logging.error', 'logging.error', (['f"""Error reading HTML file: {e}"""'], {'exc_info': '(True)'}), "(f'Error reading HTML file: {e}', exc_info=True)\n", (4195, 4243), False, 'import logging\n'), ((4263, 4332), 'fastapi.responses.HTMLResponse', 'HTMLResponse', ([], {'content': '"""Sorry, something went wrong."""', 'status_code': '(500)'}), "(content='Sorry, something went wrong.', status_code=500)\n", (4275, 4332), False, 'from fastapi.responses import HTMLResponse, JSONResponse\n'), ((4719, 4781), 'logging.error', 'logging.error', (['f"""Error processing message: {e}"""'], {'exc_info': '(True)'}), "(f'Error processing message: {e}', exc_info=True)\n", (4732, 4781), False, 'import logging\n'), ((4801, 4865), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': "{'answer': 'Sorry, something went wrong.'}"}), "(content={'answer': 'Sorry, something went wrong.'})\n", (4813, 4865), False, 'from fastapi.responses import HTMLResponse, JSONResponse\n')]
"""Utilities for running language models or Chains over datasets.""" from __future__ import annotations import asyncio import functools import logging from datetime import datetime from typing import ( Any, Callable, Coroutine, Dict, Iterator, List, Optional, Sequence, Union, ) from langchainplus_sdk import LangChainPlusClient, RunEvaluator from langchainplus_sdk.schemas import Example from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.manager import Callbacks from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.evaluation import EvaluatorCallbackHandler from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.chains.base import Chain from langchain.chat_models.base import BaseChatModel from langchain.llms.base import BaseLLM from langchain.schema import ( BaseMessage, ChatResult, HumanMessage, LLMResult, get_buffer_string, messages_from_dict, ) logger = logging.getLogger(__name__) MODEL_OR_CHAIN_FACTORY = Union[Callable[[], Chain], BaseLanguageModel] class InputFormatError(Exception): """Raised when the input format is invalid.""" def _get_prompts(inputs: Dict[str, Any]) -> List[str]: """ Get prompts from inputs. Args: inputs: The input dictionary. Returns: A list of prompts. Raises: InputFormatError: If the input format is invalid. """ if not inputs: raise InputFormatError("Inputs should not be empty.") prompts = [] if "prompt" in inputs: if not isinstance(inputs["prompt"], str): raise InputFormatError( "Expected string for 'prompt', got" f" {type(inputs['prompt']).__name__}" ) prompts = [inputs["prompt"]] elif "prompts" in inputs: if not isinstance(inputs["prompts"], list) or not all( isinstance(i, str) for i in inputs["prompts"] ): raise InputFormatError( "Expected list of strings for 'prompts'," f" got {type(inputs['prompts']).__name__}" ) prompts = inputs["prompts"] elif len(inputs) == 1: prompt_ = next(iter(inputs.values())) if isinstance(prompt_, str): prompts = [prompt_] elif isinstance(prompt_, list) and all(isinstance(i, str) for i in prompt_): prompts = prompt_ else: raise InputFormatError(f"LLM Run expects string prompt input. Got {inputs}") else: raise InputFormatError( f"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}" ) return prompts def _get_messages(inputs: Dict[str, Any]) -> List[List[BaseMessage]]: """ Get Chat Messages from inputs. Args: inputs: The input dictionary. Returns: A list of chat messages. Raises: InputFormatError: If the input format is invalid. """ if not inputs: raise InputFormatError("Inputs should not be empty.") if "messages" in inputs: single_input = inputs["messages"] elif len(inputs) == 1: single_input = next(iter(inputs.values())) else: raise InputFormatError(f"Chat Run expects 'messages' in inputs. Got {inputs}") if isinstance(single_input, list) and all( isinstance(i, dict) for i in single_input ): raw_messages = [single_input] elif isinstance(single_input, list) and all( isinstance(i, list) for i in single_input ): raw_messages = single_input else: raise InputFormatError( f"Chat Run expects List[dict] or List[List[dict]] 'messages'" f" input. Got {inputs}" ) return [messages_from_dict(batch) for batch in raw_messages] async def _arun_llm( llm: BaseLanguageModel, inputs: Dict[str, Any], *, tags: Optional[List[str]] = None, callbacks: Callbacks = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[LLMResult, ChatResult]: """ Asynchronously run the language model. Args: llm: The language model to run. inputs: The input dictionary. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. input_mapper: Optional function to map inputs to the expected format. Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid. """ if input_mapper is not None: if not isinstance(llm, (BaseLLM, BaseChatModel)): raise ValueError(f"Unsupported LLM type {type(llm).__name__}") llm_output = await llm.agenerate( input_mapper(inputs), callbacks=callbacks, tags=tags ) elif isinstance(llm, BaseLLM): try: llm_prompts = _get_prompts(inputs) llm_output = await llm.agenerate( llm_prompts, callbacks=callbacks, tags=tags ) except InputFormatError: llm_messages = _get_messages(inputs) buffer_strings = [get_buffer_string(messages) for messages in llm_messages] llm_output = await llm.agenerate( buffer_strings, callbacks=callbacks, tags=tags ) elif isinstance(llm, BaseChatModel): try: messages = _get_messages(inputs) llm_output = await llm.agenerate(messages, callbacks=callbacks, tags=tags) except InputFormatError: prompts = _get_prompts(inputs) converted_messages: List[List[BaseMessage]] = [ [HumanMessage(content=prompt)] for prompt in prompts ] llm_output = await llm.agenerate( converted_messages, callbacks=callbacks, tags=tags ) else: raise ValueError(f"Unsupported LLM type {type(llm)}") return llm_output async def _arun_llm_or_chain( example: Example, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, n_repetitions: int, *, tags: Optional[List[str]] = None, callbacks: Optional[List[BaseCallbackHandler]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[List[dict], List[str], List[LLMResult], List[ChatResult]]: """ Asynchronously run the Chain or language model. Args: example: The example to run. llm_or_chain_factory: The Chain or language model constructor to run. n_repetitions: The number of times to run the model on each example. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. input_mapper: Optional function to map the input to the expected format. Returns: A list of outputs. """ if callbacks: previous_example_ids = [ getattr(tracer, "example_id", None) for tracer in callbacks ] for tracer in callbacks: if hasattr(tracer, "example_id"): tracer.example_id = example.id else: previous_example_ids = None outputs = [] for _ in range(n_repetitions): try: if isinstance(llm_or_chain_factory, BaseLanguageModel): output: Any = await _arun_llm( llm_or_chain_factory, example.inputs, tags=tags, callbacks=callbacks, input_mapper=input_mapper, ) else: chain = llm_or_chain_factory() if input_mapper is not None: inputs_ = input_mapper(example.inputs) else: inputs_ = example.inputs if len(inputs_) == 1: inputs_ = next(iter(inputs_.values())) output = await chain.acall(inputs_, callbacks=callbacks, tags=tags) outputs.append(output) except Exception as e: logger.warning(f"Chain failed for example {example.id}. Error: {e}") outputs.append({"Error": str(e)}) if callbacks and previous_example_ids: for example_id, tracer in zip(previous_example_ids, callbacks): if hasattr(tracer, "example_id"): tracer.example_id = example_id return outputs async def _gather_with_concurrency( n: int, initializer: Callable[[], Coroutine[Any, Any, Any]], *async_funcs: Callable[ [Sequence[BaseCallbackHandler], Dict], Coroutine[Any, Any, Any] ], ) -> List[Any]: """ Run coroutines with a concurrency limit. Args: n: The maximum number of concurrent tasks. initializer: A coroutine that initializes shared resources for the tasks. async_funcs: The async_funcs to be run concurrently. Returns: A list of results from the coroutines. """ semaphore = asyncio.Semaphore(n) job_state = {"num_processed": 0} callback_queue: asyncio.Queue[Sequence[BaseCallbackHandler]] = asyncio.Queue() for _ in range(n): callback_queue.put_nowait(await initializer()) async def run_coroutine_with_semaphore( async_func: Callable[ [Sequence[BaseCallbackHandler], Dict], Coroutine[Any, Any, Any] ] ) -> Any: async with semaphore: callbacks = await callback_queue.get() try: result = await async_func(callbacks, job_state) finally: callback_queue.put_nowait(callbacks) return result results = await asyncio.gather( *(run_coroutine_with_semaphore(function) for function in async_funcs) ) while callback_queue: try: callbacks = callback_queue.get_nowait() except asyncio.QueueEmpty: break for callback in callbacks: if isinstance(callback, (LangChainTracer, EvaluatorCallbackHandler)): callback.wait_for_futures() return results async def _callbacks_initializer( project_name: Optional[str], client: LangChainPlusClient, run_evaluators: Sequence[RunEvaluator], ) -> List[BaseTracer]: """ Initialize a tracer to share across tasks. Args: project_name: The project name for the tracer. Returns: A LangChainTracer instance with an active project. """ callbacks: List[BaseTracer] = [] if project_name: callbacks.append(LangChainTracer(project_name=project_name)) if run_evaluators: callbacks.append( EvaluatorCallbackHandler( client=client, evaluators=run_evaluators, # We already have concurrency, don't want to overload the machine max_workers=1, ) ) return callbacks async def arun_on_examples( examples: Iterator[Example], llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, concurrency_level: int = 5, num_repetitions: int = 1, project_name: Optional[str] = None, verbose: bool = False, client: Optional[LangChainPlusClient] = None, tags: Optional[List[str]] = None, run_evaluators: Optional[Sequence[RunEvaluator]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Dict[str, Any]: """ Asynchronously run the chain on examples and store traces to the specified project name. Args: examples: Examples to run the model or chain over. llm_or_chain_factory: Language model or Chain constructor to run over the dataset. The Chain constructor is used to permit independent calls on each example without carrying over state. concurrency_level: The number of async tasks to run concurrently. num_repetitions: Number of times to run the model on each example. This is useful when testing success rates or generating confidence intervals. project_name: Project name to use when tracing runs. Defaults to {dataset_name}-{chain class name}-{datetime}. verbose: Whether to print progress. client: Client to use to read the dataset. If not provided, a new client will be created using the credentials in the environment. tags: Tags to add to each run in the project. run_evaluators: Evaluators to run on the results of the chain. input_mapper: function to map to the inputs dictionary from an Example to the format expected by the model to be evaluated. This is useful if your model needs to deserialize more complex schema or if your dataset has inputs with keys that differ from what is expected by your chain or agent. Returns: A dictionary mapping example ids to the model outputs. """ project_name = _get_project_name(project_name, llm_or_chain_factory, None) client_ = client or LangChainPlusClient() client_.create_project(project_name, mode="eval") results: Dict[str, List[Any]] = {} evaluation_handler = EvaluatorCallbackHandler( evaluators=run_evaluators or [], client=client_ ) async def process_example( example: Example, callbacks: List[BaseCallbackHandler], job_state: dict ) -> None: """Process a single example.""" result = await _arun_llm_or_chain( example, llm_or_chain_factory, num_repetitions, tags=tags, callbacks=callbacks, input_mapper=input_mapper, ) results[str(example.id)] = result job_state["num_processed"] += 1 if verbose: print( f"Processed examples: {job_state['num_processed']}", end="\r", flush=True, ) await _gather_with_concurrency( concurrency_level, functools.partial( _callbacks_initializer, project_name=project_name, client=client_, run_evaluators=run_evaluators or [], ), *(functools.partial(process_example, e) for e in examples), ) evaluation_handler.wait_for_futures() return results def run_llm( llm: BaseLanguageModel, inputs: Dict[str, Any], callbacks: Callbacks, *, tags: Optional[List[str]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[LLMResult, ChatResult]: """ Run the language model on the example. Args: llm: The language model to run. inputs: The input dictionary. callbacks: The callbacks to use during the run. tags: Optional tags to add to the run. input_mapper: function to map to the inputs dictionary from an Example Returns: The LLMResult or ChatResult. Raises: ValueError: If the LLM type is unsupported. InputFormatError: If the input format is invalid. """ if input_mapper is not None: if not isinstance(llm, (BaseLLM, BaseChatModel)): raise ValueError(f"Unsupported LLM type {type(llm).__name__}") llm_output = llm.generate(input_mapper(inputs), callbacks=callbacks, tags=tags) elif isinstance(llm, BaseLLM): try: llm_prompts = _get_prompts(inputs) llm_output = llm.generate(llm_prompts, callbacks=callbacks, tags=tags) except InputFormatError: llm_messages = _get_messages(inputs) buffer_strings = [get_buffer_string(messages) for messages in llm_messages] llm_output = llm.generate(buffer_strings, callbacks=callbacks) elif isinstance(llm, BaseChatModel): try: messages = _get_messages(inputs) llm_output = llm.generate(messages, callbacks=callbacks, tags=tags) except InputFormatError: prompts = _get_prompts(inputs) converted_messages: List[List[BaseMessage]] = [ [HumanMessage(content=prompt)] for prompt in prompts ] llm_output = llm.generate( converted_messages, callbacks=callbacks, tags=tags ) else: raise ValueError(f"Unsupported LLM type {type(llm)}") return llm_output def run_llm_or_chain( example: Example, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, n_repetitions: int, *, tags: Optional[List[str]] = None, callbacks: Optional[List[BaseCallbackHandler]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Union[List[dict], List[str], List[LLMResult], List[ChatResult]]: """ Run the Chain or language model synchronously. Args: example: The example to run. llm_or_chain_factory: The Chain or language model constructor to run. n_repetitions: The number of times to run the model on each example. tags: Optional tags to add to the run. callbacks: Optional callbacks to use during the run. Returns: A list of outputs. """ if callbacks: previous_example_ids = [ getattr(tracer, "example_id", None) for tracer in callbacks ] for tracer in callbacks: if hasattr(tracer, "example_id"): tracer.example_id = example.id else: previous_example_ids = None outputs = [] for _ in range(n_repetitions): try: if isinstance(llm_or_chain_factory, BaseLanguageModel): output: Any = run_llm( llm_or_chain_factory, example.inputs, callbacks, tags=tags, input_mapper=input_mapper, ) else: chain = llm_or_chain_factory() if input_mapper is not None: inputs_ = input_mapper(example.inputs) else: inputs_ = example.inputs if len(inputs_) == 1: inputs_ = next(iter(inputs_.values())) output = chain(inputs_, callbacks=callbacks, tags=tags) outputs.append(output) except Exception as e: logger.warning(f"Chain failed for example {example.id}. Error: {e}") outputs.append({"Error": str(e)}) if callbacks and previous_example_ids: for example_id, tracer in zip(previous_example_ids, callbacks): if hasattr(tracer, "example_id"): tracer.example_id = example_id return outputs def run_on_examples( examples: Iterator[Example], llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, num_repetitions: int = 1, project_name: Optional[str] = None, verbose: bool = False, client: Optional[LangChainPlusClient] = None, tags: Optional[List[str]] = None, run_evaluators: Optional[Sequence[RunEvaluator]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Dict[str, Any]: """ Run the Chain or language model on examples and store traces to the specified project name. Args: examples: Examples to run the model or chain over. llm_or_chain_factory: Language model or Chain constructor to run over the dataset. The Chain constructor is used to permit independent calls on each example without carrying over state. num_repetitions: Number of times to run the model on each example. This is useful when testing success rates or generating confidence intervals. project_name: Name of the project to store the traces in. Defaults to {dataset_name}-{chain class name}-{datetime}. verbose: Whether to print progress. client: Client to use to access the dataset. If None, a new client will be created using the credentials in the environment. tags: Tags to add to each run in the project. run_evaluators: Evaluators to run on the results of the chain. input_mapper: A function to map to the inputs dictionary from an Example to the format expected by the model to be evaluated. This is useful if your model needs to deserialize more complex schema or if your dataset has inputs with keys that differ from what is expected by your chain or agent. Returns: A dictionary mapping example ids to the model outputs. """ results: Dict[str, Any] = {} project_name = _get_project_name(project_name, llm_or_chain_factory, None) client_ = client or LangChainPlusClient() client_.create_project(project_name, mode="eval") tracer = LangChainTracer(project_name=project_name) evalution_handler = EvaluatorCallbackHandler( evaluators=run_evaluators or [], client=client_ ) callbacks: List[BaseCallbackHandler] = [tracer, evalution_handler] for i, example in enumerate(examples): result = run_llm_or_chain( example, llm_or_chain_factory, num_repetitions, tags=tags, callbacks=callbacks, input_mapper=input_mapper, ) if verbose: print(f"{i+1} processed", flush=True, end="\r") results[str(example.id)] = result tracer.wait_for_futures() evalution_handler.wait_for_futures() return results def _get_project_name( project_name: Optional[str], llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, dataset_name: Optional[str], ) -> str: """ Get the project name. Args: project_name: The project name if manually specified. llm_or_chain_factory: The Chain or language model constructor. dataset_name: The dataset name. Returns: The project name. """ if project_name is not None: return project_name current_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S") if isinstance(llm_or_chain_factory, BaseLanguageModel): model_name = llm_or_chain_factory.__class__.__name__ else: model_name = llm_or_chain_factory().__class__.__name__ dataset_prefix = f"{dataset_name}-" if dataset_name else "" return f"{dataset_prefix}{model_name}-{current_time}" async def arun_on_dataset( dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, concurrency_level: int = 5, num_repetitions: int = 1, project_name: Optional[str] = None, verbose: bool = False, client: Optional[LangChainPlusClient] = None, tags: Optional[List[str]] = None, run_evaluators: Optional[Sequence[RunEvaluator]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Dict[str, Any]: """ Asynchronously run the Chain or language model on a dataset and store traces to the specified project name. Args: dataset_name: Name of the dataset to run the chain on. llm_or_chain_factory: Language model or Chain constructor to run over the dataset. The Chain constructor is used to permit independent calls on each example without carrying over state. concurrency_level: The number of async tasks to run concurrently. num_repetitions: Number of times to run the model on each example. This is useful when testing success rates or generating confidence intervals. project_name: Name of the project to store the traces in. Defaults to {dataset_name}-{chain class name}-{datetime}. verbose: Whether to print progress. client: Client to use to read the dataset. If not provided, a new client will be created using the credentials in the environment. tags: Tags to add to each run in the project. run_evaluators: Evaluators to run on the results of the chain. input_mapper: A function to map to the inputs dictionary from an Example to the format expected by the model to be evaluated. This is useful if your model needs to deserialize more complex schema or if your dataset has inputs with keys that differ from what is expected by your chain or agent. Returns: A dictionary containing the run's project name and the resulting model outputs. """ client_ = client or LangChainPlusClient() project_name = _get_project_name(project_name, llm_or_chain_factory, dataset_name) dataset = client_.read_dataset(dataset_name=dataset_name) examples = client_.list_examples(dataset_id=str(dataset.id)) results = await arun_on_examples( examples, llm_or_chain_factory, concurrency_level=concurrency_level, num_repetitions=num_repetitions, project_name=project_name, verbose=verbose, client=client_, tags=tags, run_evaluators=run_evaluators, input_mapper=input_mapper, ) return { "project_name": project_name, "results": results, } def run_on_dataset( dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, num_repetitions: int = 1, project_name: Optional[str] = None, verbose: bool = False, client: Optional[LangChainPlusClient] = None, tags: Optional[List[str]] = None, run_evaluators: Optional[Sequence[RunEvaluator]] = None, input_mapper: Optional[Callable[[Dict], Any]] = None, ) -> Dict[str, Any]: """ Run the Chain or language model on a dataset and store traces to the specified project name. Args: dataset_name: Name of the dataset to run the chain on. llm_or_chain_factory: Language model or Chain constructor to run over the dataset. The Chain constructor is used to permit independent calls on each example without carrying over state. concurrency_level: Number of async workers to run in parallel. num_repetitions: Number of times to run the model on each example. This is useful when testing success rates or generating confidence intervals. project_name: Name of the project to store the traces in. Defaults to {dataset_name}-{chain class name}-{datetime}. verbose: Whether to print progress. client: Client to use to access the dataset. If None, a new client will be created using the credentials in the environment. tags: Tags to add to each run in the project. run_evaluators: Evaluators to run on the results of the chain. input_mapper: A function to map to the inputs dictionary from an Example to the format expected by the model to be evaluated. This is useful if your model needs to deserialize more complex schema or if your dataset has inputs with keys that differ from what is expected by your chain or agent. Returns: A dictionary containing the run's project name and the resulting model outputs. """ client_ = client or LangChainPlusClient() project_name = _get_project_name(project_name, llm_or_chain_factory, dataset_name) dataset = client_.read_dataset(dataset_name=dataset_name) examples = client_.list_examples(dataset_id=str(dataset.id)) results = run_on_examples( examples, llm_or_chain_factory, num_repetitions=num_repetitions, project_name=project_name, verbose=verbose, tags=tags, run_evaluators=run_evaluators, client=client_, input_mapper=input_mapper, ) return { "project_name": project_name, "results": results, }
[ "langchain.schema.get_buffer_string", "langchain.schema.messages_from_dict", "langchain.schema.HumanMessage", "langchain.callbacks.tracers.langchain.LangChainTracer", "langchain.callbacks.tracers.evaluation.EvaluatorCallbackHandler", "langchainplus_sdk.LangChainPlusClient" ]
[((1077, 1104), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1094, 1104), False, 'import logging\n'), ((9051, 9071), 'asyncio.Semaphore', 'asyncio.Semaphore', (['n'], {}), '(n)\n', (9068, 9071), False, 'import asyncio\n'), ((9177, 9192), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (9190, 9192), False, 'import asyncio\n'), ((13223, 13296), 'langchain.callbacks.tracers.evaluation.EvaluatorCallbackHandler', 'EvaluatorCallbackHandler', ([], {'evaluators': '(run_evaluators or [])', 'client': 'client_'}), '(evaluators=run_evaluators or [], client=client_)\n', (13247, 13296), False, 'from langchain.callbacks.tracers.evaluation import EvaluatorCallbackHandler\n'), ((20790, 20832), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'project_name'}), '(project_name=project_name)\n', (20805, 20832), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((20857, 20930), 'langchain.callbacks.tracers.evaluation.EvaluatorCallbackHandler', 'EvaluatorCallbackHandler', ([], {'evaluators': '(run_evaluators or [])', 'client': 'client_'}), '(evaluators=run_evaluators or [], client=client_)\n', (20881, 20930), False, 'from langchain.callbacks.tracers.evaluation import EvaluatorCallbackHandler\n'), ((3845, 3870), 'langchain.schema.messages_from_dict', 'messages_from_dict', (['batch'], {}), '(batch)\n', (3863, 3870), False, 'from langchain.schema import BaseMessage, ChatResult, HumanMessage, LLMResult, get_buffer_string, messages_from_dict\n'), ((13082, 13103), 'langchainplus_sdk.LangChainPlusClient', 'LangChainPlusClient', ([], {}), '()\n', (13101, 13103), False, 'from langchainplus_sdk import LangChainPlusClient, RunEvaluator\n'), ((20701, 20722), 'langchainplus_sdk.LangChainPlusClient', 'LangChainPlusClient', ([], {}), '()\n', (20720, 20722), False, 'from langchainplus_sdk import LangChainPlusClient, RunEvaluator\n'), ((24425, 24446), 'langchainplus_sdk.LangChainPlusClient', 'LangChainPlusClient', ([], {}), '()\n', (24444, 24446), False, 'from langchainplus_sdk import LangChainPlusClient, RunEvaluator\n'), ((27111, 27132), 'langchainplus_sdk.LangChainPlusClient', 'LangChainPlusClient', ([], {}), '()\n', (27130, 27132), False, 'from langchainplus_sdk import LangChainPlusClient, RunEvaluator\n'), ((10607, 10649), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'project_name'}), '(project_name=project_name)\n', (10622, 10649), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((10712, 10797), 'langchain.callbacks.tracers.evaluation.EvaluatorCallbackHandler', 'EvaluatorCallbackHandler', ([], {'client': 'client', 'evaluators': 'run_evaluators', 'max_workers': '(1)'}), '(client=client, evaluators=run_evaluators,\n max_workers=1)\n', (10736, 10797), False, 'from langchain.callbacks.tracers.evaluation import EvaluatorCallbackHandler\n'), ((14040, 14166), 'functools.partial', 'functools.partial', (['_callbacks_initializer'], {'project_name': 'project_name', 'client': 'client_', 'run_evaluators': '(run_evaluators or [])'}), '(_callbacks_initializer, project_name=project_name, client\n =client_, run_evaluators=run_evaluators or [])\n', (14057, 14166), False, 'import functools\n'), ((21992, 22006), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22004, 22006), False, 'from datetime import datetime\n'), ((14232, 14269), 'functools.partial', 'functools.partial', (['process_example', 'e'], {}), '(process_example, e)\n', (14249, 14269), False, 'import functools\n'), ((5265, 5292), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (5282, 5292), False, 'from langchain.schema import BaseMessage, ChatResult, HumanMessage, LLMResult, get_buffer_string, messages_from_dict\n'), ((15638, 15665), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (15655, 15665), False, 'from langchain.schema import BaseMessage, ChatResult, HumanMessage, LLMResult, get_buffer_string, messages_from_dict\n'), ((5785, 5813), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (5797, 5813), False, 'from langchain.schema import BaseMessage, ChatResult, HumanMessage, LLMResult, get_buffer_string, messages_from_dict\n'), ((16103, 16131), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (16115, 16131), False, 'from langchain.schema import BaseMessage, ChatResult, HumanMessage, LLMResult, get_buffer_string, messages_from_dict\n')]
from langchain import OpenAI, SQLDatabase from langchain_experimental.sql import SQLDatabaseChain from langchain.memory import ConversationBufferMemory from langchain.agents import (AgentType, AgentExecutor, create_react_agent, create_openai_functions_agent, create_sql_agent) from langchain.agents.initialize import initialize_agent from langchain.tools import Tool from langchain.prompts import PromptTemplate # from langchain.retrievers import RetrievalModel from langchain.indexes.vectorstore import VectorstoreIndexCreator from langchain_community.document_loaders.text import TextLoader from langchain_community.agent_toolkits import SQLDatabaseToolkit import langchain import psycopg2 import os import constants import constants_allen os.environ["OPENAI_API_KEY"] = constants.OPENAI_API_KEY import warnings warnings.filterwarnings('ignore') # Setup database connection # Setup database db = SQLDatabase.from_uri( f"postgresql+psycopg2://postgres:{constants.DBPASS}@localhost:5433/{constants.DB}", ) # Setup conversation memory memory = ConversationBufferMemory(memory_key="chat_history") # Setup llm llm = OpenAI(temperature=0, model_name="gpt-3.5-turbo") # Setup prompt template _DEFAULT_TEMPLATE = """Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database. Never query for all the columns from a specific table, only ask for a the few relevant columns given the question. Pay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Use the following format: Question: Question here SQLQuery: SQL Query to run SQLResult: Result of the SQLQuery Answer: Final answer here Only use the following tables: {table_info} Question: {input} """ # TEMPLATE = '''"Given your input question, provide the answer from the most relevant source. You can choose between two sources: # 1. Text File Source: This source contains Q&A data on data structures and algorithms. # 2. Database Source: This source includes information from three tables: # - 'developers' table: Contains details about developers, such as full_name, email, phone, position, and department. # - 'tasks' table: Holds information about developers' tasks, including the task, completion status, due date, completion date, and priority. # - 'insurance_data' table: Contains information about PRU Life Insurance and USA Insurance, including questions and answers. # Only query the content of the sources, not the metadata. # You have access to the following tools: # {tools} # Use the following format: # Question: the input question you must answer # Thought: you should always think about what to do # Action: the action to take, should be one of [{tool_names}] # Action Input: the input to the action # Observation: the result of the action # ... (this Thought/Action/Action Input/Observation can repeat N times) # Thought: I now know the final answer # Final Answer: the final answer to the original input question # Begin! # Question: {input} # Thought:{agent_scratchpad}''' PROMPT = PromptTemplate.from_template(_DEFAULT_TEMPLATE) # Setup db chain db_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True) toolkit = SQLDatabaseToolkit(db=db, llm=OpenAI(temperature=0, verbose=True, model_name="gpt-3.5-turbo")) # Setup text data source directory = "./data/final1.txt" loader = TextLoader(directory) index_creator = VectorstoreIndexCreator() index = index_creator.from_loaders([loader]) retriever_text = index.vectorstore.as_retriever() # Define the tools needed tools = [ Tool(name="MyDB", func=db_chain.run, description="Query the PostgreSQL database",), Tool(name="MyRetriever", func=retriever_text.get_relevant_documents, description="Retrieve documents from text file"), ] agent = initialize_agent( tools, llm, memory=memory, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, handle_parsing_errors=True, output_key="result", early_stopping_method="generate", max_iterations=3, ) # agent = create_sql_agent( # llm=OpenAI(temperature=0), # toolkit=toolkit, # verbose=True, # agent_type=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, # ) # agent = create_react_agent( # llm=llm, # tools=tools, # prompt=PROMPT # ) # agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory, # verbose=True, handle_parsing_errors=True) yellow = "\033[0;33m" green = "\033[0;32m" white = "\033[0;39m" def chat(): while True: user_input = input("You: ") # Get user input if user_input.lower() == 'exit': print(f"{green}AI: Thank you for using our AI Assistant! If you have any more questions in the future, feel free to ask. Have a great day!") break # Use the agent to generate a response try: bot_response = agent.run(user_input) # bot_response = agent_executor.invoke({"input": user_input}) except ValueError as e: bot_response = str(e) if not bot_response.startswith("Could not parse LLM output: `"): raise e bot_response = bot_response.removeprefix("Could not parse LLM output: `").removesuffix("`") print("Chatbot: " + bot_response) if __name__ == "__main__": chat()
[ "langchain_experimental.sql.SQLDatabaseChain", "langchain.agents.initialize.initialize_agent", "langchain.tools.Tool", "langchain.memory.ConversationBufferMemory", "langchain_community.document_loaders.text.TextLoader", "langchain.indexes.vectorstore.VectorstoreIndexCreator", "langchain.prompts.PromptTemplate.from_template", "langchain.SQLDatabase.from_uri", "langchain.OpenAI" ]
[((937, 970), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (960, 970), False, 'import warnings\n'), ((1023, 1137), 'langchain.SQLDatabase.from_uri', 'SQLDatabase.from_uri', (['f"""postgresql+psycopg2://postgres:{constants.DBPASS}@localhost:5433/{constants.DB}"""'], {}), "(\n f'postgresql+psycopg2://postgres:{constants.DBPASS}@localhost:5433/{constants.DB}'\n )\n", (1043, 1137), False, 'from langchain import OpenAI, SQLDatabase\n'), ((1173, 1224), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""'}), "(memory_key='chat_history')\n", (1197, 1224), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1244, 1293), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (1250, 1293), False, 'from langchain import OpenAI, SQLDatabase\n'), ((3583, 3630), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_DEFAULT_TEMPLATE'], {}), '(_DEFAULT_TEMPLATE)\n', (3611, 3630), False, 'from langchain.prompts import PromptTemplate\n'), ((3660, 3712), 'langchain_experimental.sql.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'llm': 'llm', 'database': 'db', 'verbose': '(True)'}), '(llm=llm, database=db, verbose=True)\n', (3676, 3712), False, 'from langchain_experimental.sql import SQLDatabaseChain\n'), ((3885, 3906), 'langchain_community.document_loaders.text.TextLoader', 'TextLoader', (['directory'], {}), '(directory)\n', (3895, 3906), False, 'from langchain_community.document_loaders.text import TextLoader\n'), ((3923, 3948), 'langchain.indexes.vectorstore.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {}), '()\n', (3946, 3948), False, 'from langchain.indexes.vectorstore import VectorstoreIndexCreator\n'), ((4303, 4525), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'memory': 'memory', 'agent': 'AgentType.CONVERSATIONAL_REACT_DESCRIPTION', 'verbose': '(True)', 'handle_parsing_errors': '(True)', 'output_key': '"""result"""', 'early_stopping_method': '"""generate"""', 'max_iterations': '(3)'}), "(tools, llm, memory=memory, agent=AgentType.\n CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, handle_parsing_errors=\n True, output_key='result', early_stopping_method='generate',\n max_iterations=3)\n", (4319, 4525), False, 'from langchain.agents.initialize import initialize_agent\n'), ((4085, 4171), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""MyDB"""', 'func': 'db_chain.run', 'description': '"""Query the PostgreSQL database"""'}), "(name='MyDB', func=db_chain.run, description=\n 'Query the PostgreSQL database')\n", (4089, 4171), False, 'from langchain.tools import Tool\n'), ((4173, 4294), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""MyRetriever"""', 'func': 'retriever_text.get_relevant_documents', 'description': '"""Retrieve documents from text file"""'}), "(name='MyRetriever', func=retriever_text.get_relevant_documents,\n description='Retrieve documents from text file')\n", (4177, 4294), False, 'from langchain.tools import Tool\n'), ((3753, 3816), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'verbose': '(True)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, verbose=True, model_name='gpt-3.5-turbo')\n", (3759, 3816), False, 'from langchain import OpenAI, SQLDatabase\n')]
import langchain import re from typing import TypeVar, Optional from dotenv import load_dotenv from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory from mdutils.mdutils import MdUtils from openai import ChatCompletion ## you can use typing.Self after python 3.11 Self = TypeVar("Self") def set_up() -> None: load_dotenv() langchain.verbose = True return def get_gpt_response(query: str) -> str: response = ChatCompletion.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": query}] ) return response["choices"][0]["message"]["content"].strip() """ def time_measurement(func: Callable, val: Any) -> Any: start = time.time() response = func(**val) elapsed_time = time.time() - start return response, elapsed_time """ def create_llm(llm_name: str) -> ChatOpenAI: return ChatOpenAI(temperature=0, model_name=llm_name) def create_CBmemory() -> ConversationBufferMemory: return ConversationBufferMemory( return_messages=True, memory_key="chat_history", output_key="output" ) def sep_md(mdFile: MdUtils) -> None: mdFile.new_line() mdFile.new_line("---") mdFile.new_line() def host_validation(host: Optional[str]): # hostが文字列であればTrue # TODO: 文字列の内容を加味すべき if not host: return False elif isinstance(host, str): return True def port_validation(port: Optional[str]): # portが半角数字文字列であればTrue # それ以外はFalse if not port: return False return True if re.fullmatch("[0-9]+", port) else False
[ "langchain.memory.ConversationBufferMemory", "langchain.chat_models.ChatOpenAI" ]
[((319, 334), 'typing.TypeVar', 'TypeVar', (['"""Self"""'], {}), "('Self')\n", (326, 334), False, 'from typing import TypeVar, Optional\n'), ((363, 376), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (374, 376), False, 'from dotenv import load_dotenv\n'), ((475, 570), 'openai.ChatCompletion.create', 'ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': "[{'role': 'user', 'content': query}]"}), "(model='gpt-3.5-turbo', messages=[{'role': 'user',\n 'content': query}])\n", (496, 570), False, 'from openai import ChatCompletion\n'), ((892, 938), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': 'llm_name'}), '(temperature=0, model_name=llm_name)\n', (902, 938), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1003, 1101), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(return_messages=True, memory_key='chat_history',\n output_key='output')\n", (1027, 1101), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1549, 1577), 're.fullmatch', 're.fullmatch', (['"""[0-9]+"""', 'port'], {}), "('[0-9]+', port)\n", (1561, 1577), False, 'import re\n')]
import streamlit as st import langchain_helper st.title("Restaurant Name Generator") cuisine = st.sidebar.selectbox("Pick a Cuisine", ("Indian", "Chinese", "Italian", "Mexican", "American","England")) if cuisine: response = langchain_helper.generate_restaurant_name_and_items(cuisine) st.header(response['restaurant_name'].strip()) menu_items = response['menu_items'].strip().split(',') st.write("### Menu") for item in menu_items: st.write("-",item)
[ "langchain_helper.generate_restaurant_name_and_items" ]
[((48, 85), 'streamlit.title', 'st.title', (['"""Restaurant Name Generator"""'], {}), "('Restaurant Name Generator')\n", (56, 85), True, 'import streamlit as st\n'), ((97, 207), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Pick a Cuisine"""', "('Indian', 'Chinese', 'Italian', 'Mexican', 'American', 'England')"], {}), "('Pick a Cuisine', ('Indian', 'Chinese', 'Italian',\n 'Mexican', 'American', 'England'))\n", (117, 207), True, 'import streamlit as st\n'), ((233, 293), 'langchain_helper.generate_restaurant_name_and_items', 'langchain_helper.generate_restaurant_name_and_items', (['cuisine'], {}), '(cuisine)\n', (284, 293), False, 'import langchain_helper\n'), ((408, 428), 'streamlit.write', 'st.write', (['"""### Menu"""'], {}), "('### Menu')\n", (416, 428), True, 'import streamlit as st\n'), ((465, 484), 'streamlit.write', 'st.write', (['"""-"""', 'item'], {}), "('-', item)\n", (473, 484), True, 'import streamlit as st\n')]
import os from datasets import get_dataset from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from langchain.callbacks import get_openai_callback from utils.timer import Timer import logging import numpy as np import seaborn as sns import matplotlib.pyplot as plt from config import api_key, load_config import wandb import langchain langchain.verbose = True # If you don't want your script to sync to the cloud os.environ["WANDB_MODE"] = "offline" CLASSIFIY_PROMPT = """ You are a text-to-SQL expert able to identify poorly formulated questions in natural language. The dataset used is consisting of questions and their corresponding golden SQL queries. You will be given the database schema of the database corresponding to the question. Furthermore, you will also be given a hint that provides additional information that is needed to correctly convert the question and interpret the database schema. However, some of the questions in the data are poorly formulated or contain errors. Below is a classification scheme for the questions that are to be converted into SQL queries. 0 = Correct question. May still contain minor errors in language or minor ambiguities that do not affect the interpretation and generation of the SQL query 1 = Is unclear, ambiguous, unspecific or contain grammatical errors that surely is going to affect the interpretation and generation of the SQL query. The question is unspecific in which columns that are to be returned. The question is not asking for a specific column, but asks generally about a table in the database. 2 = The question contains minor errors in language or minor ambiguities that might affect the interpretation and generation of the SQL query. 3 = The question is wrongly formulated when considering the structure of the database schema. The information that the question is asking for is not possible to accurately retrieve from the database. Here are some examples of questions that would be classified with 1 and an explanation of why: Example 1: List the customer who made the transaction id 3682978 Explanation: The question is unspecific in which columns that are to be returned. It asks to list the customers, but does not specify which columns that are to be returned from the client table. Example 2: Which district has the largest amount of female clients? Explanation: The question is unspecific in which columns that are to be returned. It asks "which district", but does not specify which columns that are to be returned from the district table. Example 3: What is the average amount of transactions done in the year of 1998 ? Explanation: Is unclear, ambiguous, unspecific or contain grammatical errors that surely is going to affect the interpretation and generation of the SQL query. Here is an example of a question that would be classified with 2 and an explanation of why: Example 1: What are the top 5 loans by region names for the month of Mars 1997? Explanation: The statement 'top 5' could be ambiguous. It could mean the top 5 loans by amount or the top 5 loans by number of loans. Here are some examples of questions that would be classified with 3 and an explanation of why: Example 1: What is the disposition id of the oldest client in the Prague region? Explanation: The question is wrongly formulated when considering the structure of the database schema. There can be multiple disposition ids for a client, since a client can have multiple accounts. The question is not asking for a specific disposition id, but asks generally about a client. Here are some examples of questions that would be classified with 0 and an explanation of why: Example 1: List the id of the customer who made the transaction id : 3682978 Explanation: Clear and correct question. Example 2: What is the name of the district that has the largest amount of female clients? Explanation: Specific and correct question. Example 3: What is the disposition id(s) of the oldest client in the Prague region? Explanation: The question is open for disposition ids which is correct when considering the sql-schema. Example 4: What was the average number of withdrawal transactions conducted by female clients from the Prague region during the year 1998? Explanation: Clear and correct question. Database schema: {database_schema} Hint: {evidence} Below you will be provided with the correct SQL-query that represents what the questions is trying to ask for. Gold query: {gold_query} Please classify the question below according to the classification scheme above, the examples, the hint and the SQL gold query provided. Also please assume that all dates, values, names and numbers in the questions are correct. Question: {question} In your answer DO NOT return anything else than the mark as a sole number. Do not return any corresponding text or explanations. """ #1 = Gray area, minor errors that may or may not affect the interpretation and generation of the SQL query. class Classifier(): total_tokens = 0 prompt_tokens = 0 total_cost = 0 completion_tokens = 0 last_call_execution_time = 0 total_call_execution_time = 0 def __init__(self, llm): self.llm = llm self.prompt_template = CLASSIFIY_PROMPT prompt = PromptTemplate( # input_variables=["question", "database_schema","evidence"], input_variables=["question", "database_schema", "evidence", 'gold_query'], template=CLASSIFIY_PROMPT, ) self.chain = LLMChain(llm=llm, prompt=prompt) def classify_question(self, question, schema, evidence, gold_query): with get_openai_callback() as cb: with Timer() as t: response = self.chain.run({ 'question': question, 'database_schema': schema, 'evidence': evidence, 'gold_query': gold_query }) logging.info(f"OpenAI API execution time: {t.elapsed_time:.2f}") self.last_call_execution_time = t.elapsed_time self.total_call_execution_time += t.elapsed_time self.total_tokens += cb.total_tokens self.prompt_tokens += cb.prompt_tokens self.total_cost += cb.total_cost self.completion_tokens += cb.completion_tokens return response accepted_faults = [1, 3] def main(): config = load_config("classifier_config.yaml") wandb.init( project=config.project, config=config, name=config.current_experiment, entity=config.entity ) artifact = wandb.Artifact('experiment_results', type='dataset') table = wandb.Table(columns=["Question", "Classified_quality", "Difficulty"]) ## Är det något mer vi vill ha med här? wandb_cm = wandb.Table(columns=['0', '1', '2', '3']) metrics_table = wandb.Table(columns=["Class", "Precision", "Recall", "F1 Score", "Accuracy"]) weighted_avg_table = wandb.Table(columns=["Metric", "Weighted Average"]) # "Weighted Averages", weighted_averages['precision'], weighted_averages['recall'], weighted_averages['f1'], weighted_averages['accuracy'] llm = ChatOpenAI( openai_api_key=api_key, model_name=config.llm_settings.model, temperature=config.llm_settings.temperature, request_timeout=config.llm_settings.request_timeout ) dataset = get_dataset("BIRDCorrectedFinancialGoldAnnotated") classifier = Classifier(llm) wandb.config['prompt'] = classifier.prompt_template no_data_points = dataset.get_number_of_data_points() tp = 0 fp = 0 tn = 0 fn = 0 confusion_matrix = np.zeros((4,4)) annotation_counts = {0: 0, 1: 0, 2: 0, 3: 0} for i in range(no_data_points): data_point = dataset.get_data_point(i) evidence = data_point['evidence'] db_id = data_point['db_id'] question = data_point['question'] gold_query = data_point['SQL'] difficulty = data_point['difficulty'] if 'difficulty' in data_point else "" annotated_question_quality = data_point["annotation"] sql_schema = dataset.get_schema_and_sample_data(db_id) classified_quality = classifier.classify_question(question, sql_schema, evidence, gold_query) classified_quality = int(classified_quality) if classified_quality.isdigit() else None print('classified_quality: ',classified_quality) if classified_quality is not None: for annotated_quality in annotated_question_quality: annotation_counts[annotated_quality] +=1 confusion_matrix[annotated_quality][classified_quality] += 1 print('confusion matrix:') print(confusion_matrix) # Converting to integer confusion_matrix = np.array(confusion_matrix).astype(int) print('annotation counts: ',annotation_counts) labels = [0, 1, 2, 3] sns.heatmap(confusion_matrix, annot=True, fmt="d", cmap="YlOrRd", xticklabels=labels, yticklabels=labels) plt.ylabel('True label') plt.xlabel('Predicted label') plt.savefig(f'{config.current_experiment}_heatmap.png') wandb.log({"confusion_matrix_heatmap": wandb.Image(f'{config.current_experiment}_heatmap.png')}) metrics = {'precision': 0, 'recall': 0, 'f1': 0, 'accuracy': 0} weighted_sums = {'precision': 0, 'recall': 0, 'f1': 0, 'accuracy': 0} total_instances = np.sum(confusion_matrix) for i in range(4): row_data = confusion_matrix[i].tolist() print('row_data: ', row_data) wandb_cm.add_data(*row_data) tp = confusion_matrix[i][i] fp = sum(confusion_matrix[:, i]) - tp fn = sum(confusion_matrix[i, :]) - tp tn = np.sum(confusion_matrix) - (tp + fp + fn) precision = tp / (tp + fp) if (tp + fp) != 0 else 0 recall = tp / (tp + fn) if (tp + fn) != 0 else 0 f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) != 0 else 0 accuracy = (tp + tn) / (tp + tn + fp + fn) metrics[i] = {'precision': precision, 'recall': recall, 'f1': f1, 'accuracy': accuracy} metrics_table.add_data(i, metrics[i]['precision'], metrics[i]['recall'], metrics[i]['f1'], metrics[i]['accuracy']) class_weight = sum(confusion_matrix[i, :]) weighted_sums['precision'] += precision * class_weight weighted_sums['recall'] += recall * class_weight weighted_sums['f1'] += f1 * class_weight weighted_sums['accuracy'] += accuracy * class_weight print('metrics for class ', i, ': ', metrics[i]) # metrics now contains the precision, recall, and F1-score for each category # annotated_question_qualities = set(annotated_question_quality) # if classified_quality.isdigit() and int(classified_quality) == 1: # if any(element in annotated_question_qualities for element in accepted_faults): # tp += 1 # else: # fp += 1 # elif classified_quality.isdigit() and int(classified_quality) == 0: # if any(element in annotated_question_qualities for element in accepted_faults): # fn += 1 # else: # tn += 1 # precision = tp / (tp + fp) # recall = tp / (tp + fn) # f1 = 2 * ((precision * recall) / (precision + recall)) # accuracy = (tp + tn) / (tp + tn + fp + fn) table.add_data(question, classified_quality, difficulty) wandb.log({ "total_tokens": classifier.total_tokens, "prompt_tokens": classifier.prompt_tokens, "completion_tokens": classifier.completion_tokens, "total_cost": classifier.total_cost, "openAPI_call_execution_time": classifier.last_call_execution_time, }, step=i+1) print("Predicted quality: ", classified_quality, " Annotated quality: ", " ".join(map(str, annotated_question_quality))) weighted_averages = {metric: total / total_instances for metric, total in weighted_sums.items()} print("Weighted Averages:", weighted_averages) # weighted_avg_table.add_data("Weighted Averages", weighted_averages['precision'], weighted_averages['recall'], weighted_averages['f1'], weighted_averages['accuracy']) weighted_avg_table.add_data("Precision", weighted_averages['precision']) weighted_avg_table.add_data("Recall", weighted_averages['recall']) weighted_avg_table.add_data("F1 Score", weighted_averages['f1']) weighted_avg_table.add_data("Accuracy", weighted_averages['accuracy']) wandb.run.summary["total_tokens"] = classifier.total_tokens wandb.run.summary["prompt_tokens"] = classifier.prompt_tokens wandb.run.summary["completion_tokens"] = classifier.completion_tokens wandb.run.summary["total_cost"] = classifier.total_cost wandb.run.summary['total_predicted_execution_time'] = dataset.total_predicted_execution_time wandb.run.summary['total_openAPI_execution_time'] = classifier.total_call_execution_time artifact.add(wandb_cm, "ConfusionMatrix_predictions") artifact.add(table, "query_results") artifact.add(metrics_table, "metrics") artifact.add(weighted_avg_table, "weighted_averages_metric_table") wandb.log_artifact(artifact) artifact_code = wandb.Artifact('code', type='code') artifact_code.add_file("src/run_classifier.py") wandb.log_artifact(artifact_code) wandb.finish() if __name__ == "__main__": main()
[ "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate", "langchain.callbacks.get_openai_callback", "langchain.chat_models.ChatOpenAI" ]
[((6515, 6552), 'config.load_config', 'load_config', (['"""classifier_config.yaml"""'], {}), "('classifier_config.yaml')\n", (6526, 6552), False, 'from config import api_key, load_config\n'), ((6558, 6666), 'wandb.init', 'wandb.init', ([], {'project': 'config.project', 'config': 'config', 'name': 'config.current_experiment', 'entity': 'config.entity'}), '(project=config.project, config=config, name=config.\n current_experiment, entity=config.entity)\n', (6568, 6666), False, 'import wandb\n'), ((6716, 6768), 'wandb.Artifact', 'wandb.Artifact', (['"""experiment_results"""'], {'type': '"""dataset"""'}), "('experiment_results', type='dataset')\n", (6730, 6768), False, 'import wandb\n'), ((6781, 6850), 'wandb.Table', 'wandb.Table', ([], {'columns': "['Question', 'Classified_quality', 'Difficulty']"}), "(columns=['Question', 'Classified_quality', 'Difficulty'])\n", (6792, 6850), False, 'import wandb\n'), ((6906, 6947), 'wandb.Table', 'wandb.Table', ([], {'columns': "['0', '1', '2', '3']"}), "(columns=['0', '1', '2', '3'])\n", (6917, 6947), False, 'import wandb\n'), ((6968, 7045), 'wandb.Table', 'wandb.Table', ([], {'columns': "['Class', 'Precision', 'Recall', 'F1 Score', 'Accuracy']"}), "(columns=['Class', 'Precision', 'Recall', 'F1 Score', 'Accuracy'])\n", (6979, 7045), False, 'import wandb\n'), ((7071, 7122), 'wandb.Table', 'wandb.Table', ([], {'columns': "['Metric', 'Weighted Average']"}), "(columns=['Metric', 'Weighted Average'])\n", (7082, 7122), False, 'import wandb\n'), ((7278, 7457), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'config.llm_settings.model', 'temperature': 'config.llm_settings.temperature', 'request_timeout': 'config.llm_settings.request_timeout'}), '(openai_api_key=api_key, model_name=config.llm_settings.model,\n temperature=config.llm_settings.temperature, request_timeout=config.\n llm_settings.request_timeout)\n', (7288, 7457), False, 'from langchain.chat_models import ChatOpenAI\n'), ((7503, 7553), 'datasets.get_dataset', 'get_dataset', (['"""BIRDCorrectedFinancialGoldAnnotated"""'], {}), "('BIRDCorrectedFinancialGoldAnnotated')\n", (7514, 7553), False, 'from datasets import get_dataset\n'), ((7770, 7786), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (7778, 7786), True, 'import numpy as np\n'), ((9068, 9177), 'seaborn.heatmap', 'sns.heatmap', (['confusion_matrix'], {'annot': '(True)', 'fmt': '"""d"""', 'cmap': '"""YlOrRd"""', 'xticklabels': 'labels', 'yticklabels': 'labels'}), "(confusion_matrix, annot=True, fmt='d', cmap='YlOrRd',\n xticklabels=labels, yticklabels=labels)\n", (9079, 9177), True, 'import seaborn as sns\n'), ((9178, 9202), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (9188, 9202), True, 'import matplotlib.pyplot as plt\n'), ((9207, 9236), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (9217, 9236), True, 'import matplotlib.pyplot as plt\n'), ((9246, 9301), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{config.current_experiment}_heatmap.png"""'], {}), "(f'{config.current_experiment}_heatmap.png')\n", (9257, 9301), True, 'import matplotlib.pyplot as plt\n'), ((9569, 9593), 'numpy.sum', 'np.sum', (['confusion_matrix'], {}), '(confusion_matrix)\n', (9575, 9593), True, 'import numpy as np\n'), ((13640, 13668), 'wandb.log_artifact', 'wandb.log_artifact', (['artifact'], {}), '(artifact)\n', (13658, 13668), False, 'import wandb\n'), ((13690, 13725), 'wandb.Artifact', 'wandb.Artifact', (['"""code"""'], {'type': '"""code"""'}), "('code', type='code')\n", (13704, 13725), False, 'import wandb\n'), ((13782, 13815), 'wandb.log_artifact', 'wandb.log_artifact', (['artifact_code'], {}), '(artifact_code)\n', (13800, 13815), False, 'import wandb\n'), ((13821, 13835), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (13833, 13835), False, 'import wandb\n'), ((5346, 5466), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question', 'database_schema', 'evidence', 'gold_query']", 'template': 'CLASSIFIY_PROMPT'}), "(input_variables=['question', 'database_schema', 'evidence',\n 'gold_query'], template=CLASSIFIY_PROMPT)\n", (5360, 5466), False, 'from langchain.prompts import PromptTemplate\n'), ((5598, 5630), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (5606, 5630), False, 'from langchain.chains import LLMChain\n'), ((11746, 12026), 'wandb.log', 'wandb.log', (["{'total_tokens': classifier.total_tokens, 'prompt_tokens': classifier.\n prompt_tokens, 'completion_tokens': classifier.completion_tokens,\n 'total_cost': classifier.total_cost, 'openAPI_call_execution_time':\n classifier.last_call_execution_time}"], {'step': '(i + 1)'}), "({'total_tokens': classifier.total_tokens, 'prompt_tokens':\n classifier.prompt_tokens, 'completion_tokens': classifier.\n completion_tokens, 'total_cost': classifier.total_cost,\n 'openAPI_call_execution_time': classifier.last_call_execution_time},\n step=i + 1)\n", (11755, 12026), False, 'import wandb\n'), ((5719, 5740), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (5738, 5740), False, 'from langchain.callbacks import get_openai_callback\n'), ((6031, 6095), 'logging.info', 'logging.info', (['f"""OpenAI API execution time: {t.elapsed_time:.2f}"""'], {}), "(f'OpenAI API execution time: {t.elapsed_time:.2f}')\n", (6043, 6095), False, 'import logging\n'), ((8942, 8968), 'numpy.array', 'np.array', (['confusion_matrix'], {}), '(confusion_matrix)\n', (8950, 8968), True, 'import numpy as np\n'), ((9346, 9401), 'wandb.Image', 'wandb.Image', (['f"""{config.current_experiment}_heatmap.png"""'], {}), "(f'{config.current_experiment}_heatmap.png')\n", (9357, 9401), False, 'import wandb\n'), ((9882, 9906), 'numpy.sum', 'np.sum', (['confusion_matrix'], {}), '(confusion_matrix)\n', (9888, 9906), True, 'import numpy as np\n'), ((5765, 5772), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (5770, 5772), False, 'from utils.timer import Timer\n')]
from fastapi import FastAPI, HTTPException import uvicorn from typing import Dict import os import sys from dotenv import load_dotenv load_dotenv() sys.path.append(os.getcwd().split(os.getenv('PROJECT_NAME'))[0] + os.getenv('PROJECT_NAME') + '/src') import langchain_functions app = FastAPI() @app.post("/generate_docstring") def generate_docstring(request: Dict): """ Generates a docstring for a given code. Parameters: request (Dict): A dictionary containing the code to generate a docstring for. Returns: output_code (str): The code with the generated docstring. """ try: # Run the LangChain function to generate the docstring output_code = langchain_functions.generate_docstring(request['file_contents']) # Return the code with docstring return {"output_code": output_code} except Exception as e: # Return an error if something goes wrong raise HTTPException(status_code=500, detail=str(e)) @app.get("/") async def root(): """ Returns a message indicating that the API is running. """ return {"message": "API Running"} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8000)
[ "langchain_functions.generate_docstring" ]
[((135, 148), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (146, 148), False, 'from dotenv import load_dotenv\n'), ((286, 295), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (293, 295), False, 'from fastapi import FastAPI, HTTPException\n'), ((1180, 1223), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "(app, host='0.0.0.0', port=8000)\n", (1191, 1223), False, 'import uvicorn\n'), ((698, 762), 'langchain_functions.generate_docstring', 'langchain_functions.generate_docstring', (["request['file_contents']"], {}), "(request['file_contents'])\n", (736, 762), False, 'import langchain_functions\n'), ((216, 241), 'os.getenv', 'os.getenv', (['"""PROJECT_NAME"""'], {}), "('PROJECT_NAME')\n", (225, 241), False, 'import os\n'), ((184, 209), 'os.getenv', 'os.getenv', (['"""PROJECT_NAME"""'], {}), "('PROJECT_NAME')\n", (193, 209), False, 'import os\n'), ((166, 177), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (175, 177), False, 'import os\n')]