code
stringlengths
141
79.4k
apis
sequencelengths
1
23
extract_api
stringlengths
126
73.2k
from __future__ import annotations import asyncio import functools import logging import os import warnings from contextlib import contextmanager from contextvars import ContextVar from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast from uuid import UUID, uuid4 import langchain from langchain.callbacks.base import ( BaseCallbackHandler, BaseCallbackManager, ChainManagerMixin, LLMManagerMixin, RunManagerMixin, ToolManagerMixin, ) from langchain.callbacks.openai_info import OpenAICallbackHandler from langchain.callbacks.stdout import StdOutCallbackHandler from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1 from langchain.callbacks.tracers.schemas import TracerSession from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler from langchain.schema import ( AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string, ) logger = logging.getLogger(__name__) Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar("openai_callback", default=None) tracing_callback_var: ContextVar[Optional[LangChainTracerV1]] = ContextVar( # noqa: E501 "tracing_callback", default=None ) tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) def _get_debug() -> bool: return langchain.debug @contextmanager def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]: """Get OpenAI callback handler in a context manager.""" cb = OpenAICallbackHandler() openai_callback_var.set(cb) yield cb openai_callback_var.set(None) @contextmanager def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Get Tracer in a context manager.""" cb = LangChainTracerV1() session = cast(TracerSessionV1, cb.load_session(session_name)) tracing_callback_var.set(cb) yield session tracing_callback_var.set(None) @contextmanager def tracing_v2_enabled( session_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, tenant_id: Optional[str] = None, session_extra: Optional[Dict[str, Any]] = None, ) -> Generator[TracerSession, None, None]: """Get the experimental tracer handler in a context manager.""" # Issue a warning that this is experimental warnings.warn( "The experimental tracing v2 is in development. " "This is not yet stable and may change in the future." ) if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer( tenant_id=tenant_id, session_name=session_name, example_id=example_id, session_extra=session_extra, ) session = cb.ensure_session() tracing_v2_callback_var.set(cb) yield session tracing_v2_callback_var.set(None) def _handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for CallbackManager.""" message_strings: Optional[List[str]] = None for handler in handlers: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): getattr(handler, event_name)(*args, **kwargs) except NotImplementedError as e: if event_name == "on_chat_model_start": if message_strings is None: message_strings = [get_buffer_string(m) for m in args[1]] _handle_event( [handler], "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning(f"Error in {event_name} callback: {e}") except Exception as e: logging.warning(f"Error in {event_name} callback: {e}") async def _ahandle_event_for_handler( handler: BaseCallbackHandler, event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) else: await asyncio.get_event_loop().run_in_executor(None, functools.partial(event, *args, **kwargs)) except NotImplementedError as e: if event_name == "on_chat_model_start": message_strings = [get_buffer_string(m) for m in args[1]] await _ahandle_event_for_handler( handler, "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning(f"Error in {event_name} callback: {e}") except Exception as e: logger.warning(f"Error in {event_name} callback: {e}") async def _ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for AsyncCallbackManager.""" await asyncio.gather( *( _ahandle_event_for_handler(handler, event_name, ignore_condition_name, *args, **kwargs) for handler in handlers ) ) BRM = TypeVar("BRM", bound="BaseRunManager") class BaseRunManager(RunManagerMixin): """Base class for run manager (a bound callback manager).""" def __init__( self, run_id: UUID, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler], parent_run_id: Optional[UUID] = None, ) -> None: """Initialize run manager.""" self.run_id = run_id self.handlers = handlers self.inheritable_handlers = inheritable_handlers self.parent_run_id = parent_run_id @classmethod def get_noop_manager(cls: Type[BRM]) -> BRM: """Return a manager that doesn't perform any operations.""" return cls(uuid4(), [], []) class RunManager(BaseRunManager): """Sync Run Manager.""" def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received.""" _handle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncRunManager(BaseRunManager): """Async Run Manager.""" async def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received.""" await _ahandle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): """Callback manager for LLM run.""" def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token.""" _handle_event( self.handlers, "on_llm_new_token", "ignore_llm", token=token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" _handle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors.""" _handle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): """Async callback manager for LLM run.""" async def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token.""" await _ahandle_event( self.handlers, "on_llm_new_token", "ignore_llm", token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" await _ahandle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors.""" await _ahandle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForChainRun(RunManager, ChainManagerMixin): """Callback manager for chain run.""" def get_child(self) -> CallbackManager: """Get a child callback manager.""" manager = CallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" _handle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors.""" _handle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received.""" _handle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received.""" _handle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin): """Async callback manager for chain run.""" def get_child(self) -> AsyncCallbackManager: """Get a child callback manager.""" manager = AsyncCallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" await _ahandle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors.""" await _ahandle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received.""" await _ahandle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received.""" await _ahandle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForToolRun(RunManager, ToolManagerMixin): """Callback manager for tool run.""" def get_child(self) -> CallbackManager: """Get a child callback manager.""" manager = CallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager def on_tool_end( self, output: str, **kwargs: Any, ) -> None: """Run when tool ends running.""" _handle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors.""" _handle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_tool_end_data_model( self, output, **kwargs: Any, ): """Return the data model for the on_tool_end event.""" _handle_event( self.handlers, "on_tool_end_data_model", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin): """Async callback manager for tool run.""" def get_child(self) -> AsyncCallbackManager: """Get a child callback manager.""" manager = AsyncCallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager async def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" await _ahandle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors.""" await _ahandle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManager(BaseCallbackManager): """Callback manager that can be used to handle callbacks from langchain.""" def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForLLMRun: """Run when LLM starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, prompts, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForLLMRun: """Run when LLM starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, messages, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) # Re-use the LLM Run Manager since the outputs are treated # the same for now return CallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForChainRun: """Run when chain starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForChainRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForToolRun: """Run when tool starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForToolRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, ) -> CallbackManager: """Configure the callback manager.""" return _configure(cls, inheritable_callbacks, local_callbacks, verbose) class AsyncCallbackManager(BaseCallbackManager): """Async callback manager that can be used to handle callbacks from LangChain.""" @property def is_async(self) -> bool: """Return whether the handler is async.""" return True async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForLLMRun: """Run when LLM starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, prompts, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) async def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, messages, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) async def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForChainRun: """Run when chain starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForChainRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) async def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForToolRun: """Run when tool starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForToolRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, ) -> AsyncCallbackManager: """Configure the callback manager.""" return _configure(cls, inheritable_callbacks, local_callbacks, verbose) T = TypeVar("T", CallbackManager, AsyncCallbackManager) def _configure( callback_manager_cls: Type[T], inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, ) -> T: """Configure the callback manager.""" callback_manager = callback_manager_cls([]) if inheritable_callbacks or local_callbacks: if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: inheritable_callbacks_ = inheritable_callbacks or [] callback_manager = callback_manager_cls( handlers=inheritable_callbacks_.copy(), inheritable_handlers=inheritable_callbacks_.copy(), ) else: callback_manager = callback_manager_cls( handlers=inheritable_callbacks.handlers, inheritable_handlers=inheritable_callbacks.inheritable_handlers, parent_run_id=inheritable_callbacks.parent_run_id, ) local_handlers_ = ( local_callbacks if isinstance(local_callbacks, list) else (local_callbacks.handlers if local_callbacks else []) ) for handler in local_handlers_: callback_manager.add_handler(handler, False) tracer = tracing_callback_var.get() open_ai = openai_callback_var.get() tracing_enabled_ = ( os.environ.get("LANGCHAIN_TRACING") is not None or tracer is not None or os.environ.get("LANGCHAIN_HANDLER") is not None ) tracer_v2 = tracing_v2_callback_var.get() tracing_v2_enabled_ = os.environ.get("LANGCHAIN_TRACING_V2") is not None or tracer_v2 is not None tracer_session = os.environ.get("LANGCHAIN_SESSION") debug = _get_debug() if tracer_session is None: tracer_session = "default" if verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or open_ai is not None: if verbose and not any(isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers): if debug: pass else: callback_manager.add_handler(StdOutCallbackHandler(), False) if debug and not any(isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers): callback_manager.add_handler(ConsoleCallbackHandler(), True) if tracing_enabled_ and not any( isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers ): if tracer: callback_manager.add_handler(tracer, True) else: handler = LangChainTracerV1() handler.load_session(tracer_session) callback_manager.add_handler(handler, True) if tracing_v2_enabled_ and not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): if tracer_v2: callback_manager.add_handler(tracer_v2, True) else: try: handler = LangChainTracer(session_name=tracer_session) handler.ensure_session() callback_manager.add_handler(handler, True) except Exception as e: logger.debug("Unable to load requested LangChainTracer", e) if open_ai is not None and not any( isinstance(handler, OpenAICallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(open_ai, True) return callback_manager
[ "langchain.schema.get_buffer_string", "langchain.callbacks.stdout.StdOutCallbackHandler", "langchain.callbacks.tracers.stdout.ConsoleCallbackHandler", "langchain.callbacks.openai_info.OpenAICallbackHandler", "langchain.callbacks.tracers.langchain.LangChainTracer", "langchain.callbacks.tracers.langchain_v1.LangChainTracerV1" ]
[((1036, 1063), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1053, 1063), False, 'import logging\n'), ((1208, 1251), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1218, 1251), False, 'from contextvars import ContextVar\n'), ((1316, 1360), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback"""'], {'default': 'None'}), "('tracing_callback', default=None)\n", (1326, 1360), False, 'from contextvars import ContextVar\n'), ((1446, 1493), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback_v2"""'], {'default': 'None'}), "('tracing_callback_v2', default=None)\n", (1456, 1493), False, 'from contextvars import ContextVar\n'), ((5790, 5828), 'typing.TypeVar', 'TypeVar', (['"""BRM"""'], {'bound': '"""BaseRunManager"""'}), "('BRM', bound='BaseRunManager')\n", (5797, 5828), False, 'from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((22935, 22986), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'CallbackManager', 'AsyncCallbackManager'], {}), "('T', CallbackManager, AsyncCallbackManager)\n", (22942, 22986), False, 'from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((1731, 1754), 'langchain.callbacks.openai_info.OpenAICallbackHandler', 'OpenAICallbackHandler', ([], {}), '()\n', (1752, 1754), False, 'from langchain.callbacks.openai_info import OpenAICallbackHandler\n'), ((2005, 2024), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (2022, 2024), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((2570, 2696), 'warnings.warn', 'warnings.warn', (['"""The experimental tracing v2 is in development. This is not yet stable and may change in the future."""'], {}), "(\n 'The experimental tracing v2 is in development. This is not yet stable and may change in the future.'\n )\n", (2583, 2696), False, 'import warnings\n'), ((2787, 2907), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'tenant_id': 'tenant_id', 'session_name': 'session_name', 'example_id': 'example_id', 'session_extra': 'session_extra'}), '(tenant_id=tenant_id, session_name=session_name, example_id=\n example_id, session_extra=session_extra)\n', (2802, 2907), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((24635, 24670), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_SESSION"""'], {}), "('LANGCHAIN_SESSION')\n", (24649, 24670), False, 'import os\n'), ((2761, 2777), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2765, 2777), False, 'from uuid import UUID, uuid4\n'), ((4562, 4596), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['event'], {}), '(event)\n', (4589, 4596), False, 'import asyncio\n'), ((6507, 6514), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (6512, 6514), False, 'from uuid import UUID, uuid4\n'), ((16682, 16689), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (16687, 16689), False, 'from uuid import UUID, uuid4\n'), ((17367, 17374), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (17372, 17374), False, 'from uuid import UUID, uuid4\n'), ((18148, 18155), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18153, 18155), False, 'from uuid import UUID, uuid4\n'), ((18861, 18868), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18866, 18868), False, 'from uuid import UUID, uuid4\n'), ((20121, 20128), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20126, 20128), False, 'from uuid import UUID, uuid4\n'), ((20760, 20767), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20765, 20767), False, 'from uuid import UUID, uuid4\n'), ((21471, 21478), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (21476, 21478), False, 'from uuid import UUID, uuid4\n'), ((22207, 22214), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22212, 22214), False, 'from uuid import UUID, uuid4\n'), ((24322, 24357), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_TRACING"""'], {}), "('LANGCHAIN_TRACING')\n", (24336, 24357), False, 'import os\n'), ((24411, 24446), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HANDLER"""'], {}), "('LANGCHAIN_HANDLER')\n", (24425, 24446), False, 'import os\n'), ((24538, 24576), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_TRACING_V2"""'], {}), "('LANGCHAIN_TRACING_V2')\n", (24552, 24576), False, 'import os\n'), ((4161, 4216), 'logging.warning', 'logging.warning', (['f"""Error in {event_name} callback: {e}"""'], {}), "(f'Error in {event_name} callback: {e}')\n", (4176, 4216), False, 'import logging\n'), ((25265, 25289), 'langchain.callbacks.tracers.stdout.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (25287, 25289), False, 'from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler\n'), ((25567, 25586), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (25584, 25586), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((4889, 4909), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (4906, 4909), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((25076, 25099), 'langchain.callbacks.stdout.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (25097, 25099), False, 'from langchain.callbacks.stdout import StdOutCallbackHandler\n'), ((26002, 26046), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'tracer_session'}), '(session_name=tracer_session)\n', (26017, 26046), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((4730, 4771), 'functools.partial', 'functools.partial', (['event', '*args'], {}), '(event, *args, **kwargs)\n', (4747, 4771), False, 'import functools\n'), ((3713, 3733), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (3730, 3733), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((4683, 4707), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4705, 4707), False, 'import asyncio\n')]
"""Base interface that all chains should implement.""" import inspect import json import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Optional, Union import yaml from pydantic import BaseModel, Field, root_validator, validator import langchain from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks, ) from langchain.schema import RUN_KEY, BaseMemory, RunInfo def _get_verbosity() -> bool: return langchain.verbose class Chain(BaseModel, ABC): """Base interface that all chains should implement.""" memory: Optional[BaseMemory] = None callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) verbose: bool = Field( default_factory=_get_verbosity ) # Whether to print the response text class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @property def _chain_type(self) -> str: raise NotImplementedError("Saving not supported for this chain type.") @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @property @abstractmethod def input_keys(self) -> List[str]: """Input keys this chain expects.""" @property @abstractmethod def output_keys(self) -> List[str]: """Output keys this chain expects.""" def _validate_inputs(self, inputs: Dict[str, Any]) -> None: """Check that all inputs are present.""" missing_keys = set(self.input_keys).difference(inputs) if missing_keys: raise ValueError(f"Missing some input keys: {missing_keys}") def _validate_outputs(self, outputs: Dict[str, Any]) -> None: missing_keys = set(self.output_keys).difference(outputs) if missing_keys: raise ValueError(f"Missing some output keys: {missing_keys}") @abstractmethod def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the logic of this chain and return the output.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the logic of this chain and return the output.""" raise NotImplementedError("Async call not supported for this chain type.") def __call__( self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Callbacks = None, *, include_run_info: bool = False, ) -> Dict[str, Any]: """Run the logic of this chain and add to output if desired. Args: inputs: Dictionary of inputs, or single input if chain expects only one param. return_only_outputs: boolean for whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks: Callbacks to use for this chain run. If not provided, will use the callbacks provided to the chain. include_run_info: Whether to include run info in the response. Defaults to False. """ inputs = self.prep_inputs(inputs) callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") run_manager = callback_manager.on_chain_start( {"name": self.__class__.__name__}, inputs, ) try: outputs = ( self._call(inputs, run_manager=run_manager) if new_arg_supported else self._call(inputs) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_chain_error(e) raise e run_manager.on_chain_end(outputs) final_outputs: Dict[str, Any] = self.prep_outputs( inputs, outputs, return_only_outputs ) if include_run_info: final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) return final_outputs async def acall( self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Callbacks = None, *, include_run_info: bool = False, ) -> Dict[str, Any]: """Run the logic of this chain and add to output if desired. Args: inputs: Dictionary of inputs, or single input if chain expects only one param. return_only_outputs: boolean for whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks: Callbacks to use for this chain run. If not provided, will use the callbacks provided to the chain. include_run_info: Whether to include run info in the response. Defaults to False. """ inputs = self.prep_inputs(inputs) callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") run_manager = await callback_manager.on_chain_start( {"name": self.__class__.__name__}, inputs, ) try: outputs = ( await self._acall(inputs, run_manager=run_manager) if new_arg_supported else await self._acall(inputs) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise e await run_manager.on_chain_end(outputs) final_outputs: Dict[str, Any] = self.prep_outputs( inputs, outputs, return_only_outputs ) if include_run_info: final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) return final_outputs def prep_outputs( self, inputs: Dict[str, str], outputs: Dict[str, str], return_only_outputs: bool = False, ) -> Dict[str, str]: """Validate and prep outputs.""" self._validate_outputs(outputs) if self.memory is not None: self.memory.save_context(inputs, outputs) if return_only_outputs: return outputs else: return {**inputs, **outputs} def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]: """Validate and prep inputs.""" if not isinstance(inputs, dict): _input_keys = set(self.input_keys) if self.memory is not None: # If there are multiple input keys, but some get set by memory so that # only one is not set, we can still figure out which key it is. _input_keys = _input_keys.difference(self.memory.memory_variables) if len(_input_keys) != 1: raise ValueError( f"A single string input was passed in, but this chain expects " f"multiple inputs ({_input_keys}). When a chain expects " f"multiple inputs, please call it by passing in a dictionary, " "eg `chain({'foo': 1, 'bar': 2})`" ) inputs = {list(_input_keys)[0]: inputs} if self.memory is not None: external_context = self.memory.load_memory_variables(inputs) inputs = dict(inputs, **external_context) self._validate_inputs(inputs) return inputs def apply( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> List[Dict[str, str]]: """Call the chain on all inputs in the list.""" return [self(inputs, callbacks=callbacks) for inputs in input_list] def run(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str: """Run the chain as text in, text out or multiple variables, text out.""" if len(self.output_keys) != 1: raise ValueError( f"`run` not supported when there is not exactly " f"one output key. Got {self.output_keys}." ) if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") return self(args[0], callbacks=callbacks)[self.output_keys[0]] if kwargs and not args: return self(kwargs, callbacks=callbacks)[self.output_keys[0]] if not kwargs and not args: raise ValueError( "`run` supported with either positional arguments or keyword arguments," " but none were provided." ) raise ValueError( f"`run` supported with either positional arguments or keyword arguments" f" but not both. Got args: {args} and kwargs: {kwargs}." ) async def arun(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str: """Run the chain as text in, text out or multiple variables, text out.""" if len(self.output_keys) != 1: raise ValueError( f"`run` not supported when there is not exactly " f"one output key. Got {self.output_keys}." ) if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") return (await self.acall(args[0], callbacks=callbacks))[self.output_keys[0]] if kwargs and not args: return (await self.acall(kwargs, callbacks=callbacks))[self.output_keys[0]] raise ValueError( f"`run` supported with either positional arguments or keyword arguments" f" but not both. Got args: {args} and kwargs: {kwargs}." ) def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of chain.""" if self.memory is not None: raise ValueError("Saving of memory is not yet supported.") _dict = super().dict() _dict["_type"] = self._chain_type return _dict def save(self, file_path: Union[Path, str]) -> None: """Save the chain. Args: file_path: Path to file to save the chain to. Example: .. code-block:: python chain.save(file_path="path/chain.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save chain_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(chain_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(chain_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml")
[ "langchain.schema.RunInfo", "langchain.callbacks.manager.AsyncCallbackManager.configure", "langchain.callbacks.manager.CallbackManager.configure" ]
[((816, 849), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (821, 849), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((904, 937), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (909, 937), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((958, 995), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (963, 995), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1295, 1311), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1309, 1311), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1747, 1790), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (1756, 1790), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((4447, 4513), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (4472, 4513), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((6411, 6482), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (6441, 6482), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((1502, 1604), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (1515, 1604), False, 'import warnings\n'), ((5284, 5318), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (5291, 5318), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((7286, 7320), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7293, 7320), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((11932, 11947), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11936, 11947), False, 'from pathlib import Path\n'), ((12267, 12301), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (12276, 12301), False, 'import json\n'), ((4564, 4593), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (4581, 4593), False, 'import inspect\n'), ((6533, 6563), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (6550, 6563), False, 'import inspect\n'), ((12404, 12454), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (12413, 12454), False, 'import yaml\n')]
"""Base interface that all chains should implement.""" import inspect import json import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Optional, Union import yaml from pydantic import BaseModel, Field, root_validator, validator import langchain from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks, ) from langchain.schema import RUN_KEY, BaseMemory, RunInfo def _get_verbosity() -> bool: return langchain.verbose class Chain(BaseModel, ABC): """Base interface that all chains should implement.""" memory: Optional[BaseMemory] = None callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) verbose: bool = Field( default_factory=_get_verbosity ) # Whether to print the response text class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @property def _chain_type(self) -> str: raise NotImplementedError("Saving not supported for this chain type.") @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @property @abstractmethod def input_keys(self) -> List[str]: """Input keys this chain expects.""" @property @abstractmethod def output_keys(self) -> List[str]: """Output keys this chain expects.""" def _validate_inputs(self, inputs: Dict[str, Any]) -> None: """Check that all inputs are present.""" missing_keys = set(self.input_keys).difference(inputs) if missing_keys: raise ValueError(f"Missing some input keys: {missing_keys}") def _validate_outputs(self, outputs: Dict[str, Any]) -> None: missing_keys = set(self.output_keys).difference(outputs) if missing_keys: raise ValueError(f"Missing some output keys: {missing_keys}") @abstractmethod def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the logic of this chain and return the output.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the logic of this chain and return the output.""" raise NotImplementedError("Async call not supported for this chain type.") def __call__( self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Callbacks = None, *, include_run_info: bool = False, ) -> Dict[str, Any]: """Run the logic of this chain and add to output if desired. Args: inputs: Dictionary of inputs, or single input if chain expects only one param. return_only_outputs: boolean for whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks: Callbacks to use for this chain run. If not provided, will use the callbacks provided to the chain. include_run_info: Whether to include run info in the response. Defaults to False. """ inputs = self.prep_inputs(inputs) callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") run_manager = callback_manager.on_chain_start( {"name": self.__class__.__name__}, inputs, ) try: outputs = ( self._call(inputs, run_manager=run_manager) if new_arg_supported else self._call(inputs) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_chain_error(e) raise e run_manager.on_chain_end(outputs) final_outputs: Dict[str, Any] = self.prep_outputs( inputs, outputs, return_only_outputs ) if include_run_info: final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) return final_outputs async def acall( self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Callbacks = None, *, include_run_info: bool = False, ) -> Dict[str, Any]: """Run the logic of this chain and add to output if desired. Args: inputs: Dictionary of inputs, or single input if chain expects only one param. return_only_outputs: boolean for whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks: Callbacks to use for this chain run. If not provided, will use the callbacks provided to the chain. include_run_info: Whether to include run info in the response. Defaults to False. """ inputs = self.prep_inputs(inputs) callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") run_manager = await callback_manager.on_chain_start( {"name": self.__class__.__name__}, inputs, ) try: outputs = ( await self._acall(inputs, run_manager=run_manager) if new_arg_supported else await self._acall(inputs) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise e await run_manager.on_chain_end(outputs) final_outputs: Dict[str, Any] = self.prep_outputs( inputs, outputs, return_only_outputs ) if include_run_info: final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) return final_outputs def prep_outputs( self, inputs: Dict[str, str], outputs: Dict[str, str], return_only_outputs: bool = False, ) -> Dict[str, str]: """Validate and prep outputs.""" self._validate_outputs(outputs) if self.memory is not None: self.memory.save_context(inputs, outputs) if return_only_outputs: return outputs else: return {**inputs, **outputs} def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]: """Validate and prep inputs.""" if not isinstance(inputs, dict): _input_keys = set(self.input_keys) if self.memory is not None: # If there are multiple input keys, but some get set by memory so that # only one is not set, we can still figure out which key it is. _input_keys = _input_keys.difference(self.memory.memory_variables) if len(_input_keys) != 1: raise ValueError( f"A single string input was passed in, but this chain expects " f"multiple inputs ({_input_keys}). When a chain expects " f"multiple inputs, please call it by passing in a dictionary, " "eg `chain({'foo': 1, 'bar': 2})`" ) inputs = {list(_input_keys)[0]: inputs} if self.memory is not None: external_context = self.memory.load_memory_variables(inputs) inputs = dict(inputs, **external_context) self._validate_inputs(inputs) return inputs def apply( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> List[Dict[str, str]]: """Call the chain on all inputs in the list.""" return [self(inputs, callbacks=callbacks) for inputs in input_list] def run(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str: """Run the chain as text in, text out or multiple variables, text out.""" if len(self.output_keys) != 1: raise ValueError( f"`run` not supported when there is not exactly " f"one output key. Got {self.output_keys}." ) if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") return self(args[0], callbacks=callbacks)[self.output_keys[0]] if kwargs and not args: return self(kwargs, callbacks=callbacks)[self.output_keys[0]] if not kwargs and not args: raise ValueError( "`run` supported with either positional arguments or keyword arguments," " but none were provided." ) raise ValueError( f"`run` supported with either positional arguments or keyword arguments" f" but not both. Got args: {args} and kwargs: {kwargs}." ) async def arun(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str: """Run the chain as text in, text out or multiple variables, text out.""" if len(self.output_keys) != 1: raise ValueError( f"`run` not supported when there is not exactly " f"one output key. Got {self.output_keys}." ) if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") return (await self.acall(args[0], callbacks=callbacks))[self.output_keys[0]] if kwargs and not args: return (await self.acall(kwargs, callbacks=callbacks))[self.output_keys[0]] raise ValueError( f"`run` supported with either positional arguments or keyword arguments" f" but not both. Got args: {args} and kwargs: {kwargs}." ) def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of chain.""" if self.memory is not None: raise ValueError("Saving of memory is not yet supported.") _dict = super().dict() _dict["_type"] = self._chain_type return _dict def save(self, file_path: Union[Path, str]) -> None: """Save the chain. Args: file_path: Path to file to save the chain to. Example: .. code-block:: python chain.save(file_path="path/chain.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save chain_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(chain_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(chain_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml")
[ "langchain.schema.RunInfo", "langchain.callbacks.manager.AsyncCallbackManager.configure", "langchain.callbacks.manager.CallbackManager.configure" ]
[((816, 849), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (821, 849), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((904, 937), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (909, 937), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((958, 995), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (963, 995), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1295, 1311), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1309, 1311), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1747, 1790), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (1756, 1790), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((4447, 4513), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (4472, 4513), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((6411, 6482), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (6441, 6482), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((1502, 1604), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (1515, 1604), False, 'import warnings\n'), ((5284, 5318), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (5291, 5318), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((7286, 7320), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7293, 7320), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((11932, 11947), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11936, 11947), False, 'from pathlib import Path\n'), ((12267, 12301), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (12276, 12301), False, 'import json\n'), ((4564, 4593), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (4581, 4593), False, 'import inspect\n'), ((6533, 6563), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (6550, 6563), False, 'import inspect\n'), ((12404, 12454), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (12413, 12454), False, 'import yaml\n')]
"""Base interface that all chains should implement.""" import inspect import json import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Optional, Union import yaml from pydantic import BaseModel, Field, root_validator, validator import langchain from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks, ) from langchain.schema import RUN_KEY, BaseMemory, RunInfo def _get_verbosity() -> bool: return langchain.verbose class Chain(BaseModel, ABC): """Base interface that all chains should implement.""" memory: Optional[BaseMemory] = None callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) verbose: bool = Field( default_factory=_get_verbosity ) # Whether to print the response text class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @property def _chain_type(self) -> str: raise NotImplementedError("Saving not supported for this chain type.") @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @property @abstractmethod def input_keys(self) -> List[str]: """Input keys this chain expects.""" @property @abstractmethod def output_keys(self) -> List[str]: """Output keys this chain expects.""" def _validate_inputs(self, inputs: Dict[str, Any]) -> None: """Check that all inputs are present.""" missing_keys = set(self.input_keys).difference(inputs) if missing_keys: raise ValueError(f"Missing some input keys: {missing_keys}") def _validate_outputs(self, outputs: Dict[str, Any]) -> None: missing_keys = set(self.output_keys).difference(outputs) if missing_keys: raise ValueError(f"Missing some output keys: {missing_keys}") @abstractmethod def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the logic of this chain and return the output.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the logic of this chain and return the output.""" raise NotImplementedError("Async call not supported for this chain type.") def __call__( self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Callbacks = None, *, include_run_info: bool = False, ) -> Dict[str, Any]: """Run the logic of this chain and add to output if desired. Args: inputs: Dictionary of inputs, or single input if chain expects only one param. return_only_outputs: boolean for whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks: Callbacks to use for this chain run. If not provided, will use the callbacks provided to the chain. include_run_info: Whether to include run info in the response. Defaults to False. """ inputs = self.prep_inputs(inputs) callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") run_manager = callback_manager.on_chain_start( {"name": self.__class__.__name__}, inputs, ) try: outputs = ( self._call(inputs, run_manager=run_manager) if new_arg_supported else self._call(inputs) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_chain_error(e) raise e run_manager.on_chain_end(outputs) final_outputs: Dict[str, Any] = self.prep_outputs( inputs, outputs, return_only_outputs ) if include_run_info: final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) return final_outputs async def acall( self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Callbacks = None, *, include_run_info: bool = False, ) -> Dict[str, Any]: """Run the logic of this chain and add to output if desired. Args: inputs: Dictionary of inputs, or single input if chain expects only one param. return_only_outputs: boolean for whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks: Callbacks to use for this chain run. If not provided, will use the callbacks provided to the chain. include_run_info: Whether to include run info in the response. Defaults to False. """ inputs = self.prep_inputs(inputs) callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") run_manager = await callback_manager.on_chain_start( {"name": self.__class__.__name__}, inputs, ) try: outputs = ( await self._acall(inputs, run_manager=run_manager) if new_arg_supported else await self._acall(inputs) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise e await run_manager.on_chain_end(outputs) final_outputs: Dict[str, Any] = self.prep_outputs( inputs, outputs, return_only_outputs ) if include_run_info: final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) return final_outputs def prep_outputs( self, inputs: Dict[str, str], outputs: Dict[str, str], return_only_outputs: bool = False, ) -> Dict[str, str]: """Validate and prep outputs.""" self._validate_outputs(outputs) if self.memory is not None: self.memory.save_context(inputs, outputs) if return_only_outputs: return outputs else: return {**inputs, **outputs} def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]: """Validate and prep inputs.""" if not isinstance(inputs, dict): _input_keys = set(self.input_keys) if self.memory is not None: # If there are multiple input keys, but some get set by memory so that # only one is not set, we can still figure out which key it is. _input_keys = _input_keys.difference(self.memory.memory_variables) if len(_input_keys) != 1: raise ValueError( f"A single string input was passed in, but this chain expects " f"multiple inputs ({_input_keys}). When a chain expects " f"multiple inputs, please call it by passing in a dictionary, " "eg `chain({'foo': 1, 'bar': 2})`" ) inputs = {list(_input_keys)[0]: inputs} if self.memory is not None: external_context = self.memory.load_memory_variables(inputs) inputs = dict(inputs, **external_context) self._validate_inputs(inputs) return inputs def apply( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> List[Dict[str, str]]: """Call the chain on all inputs in the list.""" return [self(inputs, callbacks=callbacks) for inputs in input_list] def run(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str: """Run the chain as text in, text out or multiple variables, text out.""" if len(self.output_keys) != 1: raise ValueError( f"`run` not supported when there is not exactly " f"one output key. Got {self.output_keys}." ) if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") return self(args[0], callbacks=callbacks)[self.output_keys[0]] if kwargs and not args: return self(kwargs, callbacks=callbacks)[self.output_keys[0]] if not kwargs and not args: raise ValueError( "`run` supported with either positional arguments or keyword arguments," " but none were provided." ) raise ValueError( f"`run` supported with either positional arguments or keyword arguments" f" but not both. Got args: {args} and kwargs: {kwargs}." ) async def arun(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str: """Run the chain as text in, text out or multiple variables, text out.""" if len(self.output_keys) != 1: raise ValueError( f"`run` not supported when there is not exactly " f"one output key. Got {self.output_keys}." ) if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") return (await self.acall(args[0], callbacks=callbacks))[self.output_keys[0]] if kwargs and not args: return (await self.acall(kwargs, callbacks=callbacks))[self.output_keys[0]] raise ValueError( f"`run` supported with either positional arguments or keyword arguments" f" but not both. Got args: {args} and kwargs: {kwargs}." ) def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of chain.""" if self.memory is not None: raise ValueError("Saving of memory is not yet supported.") _dict = super().dict() _dict["_type"] = self._chain_type return _dict def save(self, file_path: Union[Path, str]) -> None: """Save the chain. Args: file_path: Path to file to save the chain to. Example: .. code-block:: python chain.save(file_path="path/chain.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save chain_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(chain_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(chain_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml")
[ "langchain.schema.RunInfo", "langchain.callbacks.manager.AsyncCallbackManager.configure", "langchain.callbacks.manager.CallbackManager.configure" ]
[((816, 849), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (821, 849), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((904, 937), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (909, 937), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((958, 995), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (963, 995), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1295, 1311), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1309, 1311), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1747, 1790), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (1756, 1790), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((4447, 4513), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (4472, 4513), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((6411, 6482), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (6441, 6482), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((1502, 1604), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (1515, 1604), False, 'import warnings\n'), ((5284, 5318), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (5291, 5318), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((7286, 7320), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7293, 7320), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((11932, 11947), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11936, 11947), False, 'from pathlib import Path\n'), ((12267, 12301), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (12276, 12301), False, 'import json\n'), ((4564, 4593), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (4581, 4593), False, 'import inspect\n'), ((6533, 6563), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (6550, 6563), False, 'import inspect\n'), ((12404, 12454), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (12413, 12454), False, 'import yaml\n')]
"""Base interface for large language models to expose.""" import inspect import json import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union import yaml from pydantic import Extra, Field, root_validator, validator import langchain from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain.schema import ( AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string, ) def _get_verbosity() -> bool: return langchain.verbose def get_prompts( params: Dict[str, Any], prompts: List[str] ) -> Tuple[Dict[int, List], str, List[int], List[str]]: """Get prompts that are already cached.""" llm_string = str(sorted([(k, v) for k, v in params.items()])) missing_prompts = [] missing_prompt_idxs = [] existing_prompts = {} for i, prompt in enumerate(prompts): if langchain.llm_cache is not None: cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): existing_prompts[i] = cache_val else: missing_prompts.append(prompt) missing_prompt_idxs.append(i) return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts def update_cache( existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str], ) -> Optional[dict]: """Update the cache and get the LLM output.""" for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if langchain.llm_cache is not None: langchain.llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output class BaseLLM(BaseLanguageModel, ABC): """LLM wrapper should take in a prompt and return a string.""" cache: Optional[bool] = None verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompts.""" @abstractmethod async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompts.""" def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return self.generate(prompt_strings, stop=stop, callbacks=callbacks) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks) def generate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # If string is passed in directly no errors will be raised but outputs will # not make sense. if not isinstance(prompts, list): raise ValueError( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, invocation_params=params ) try: output = ( self._generate(prompts, stop=stop, run_manager=run_manager) if new_arg_supported else self._generate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e run_manager.on_llm_end(output) if run_manager: output.run = RunInfo(run_id=run_manager.run_id) return output if len(missing_prompts) > 0: run_manager = callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, invocation_params=params, ) try: new_results = ( self._generate(missing_prompts, stop=stop, run_manager=run_manager) if new_arg_supported else self._generate(missing_prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e run_manager.on_llm_end(new_results) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = None if run_manager: run_info = RunInfo(run_id=run_manager.run_id) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) async def agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = await callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, invocation_params=params ) try: output = ( await self._agenerate(prompts, stop=stop, run_manager=run_manager) if new_arg_supported else await self._agenerate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e, verbose=self.verbose) raise e await run_manager.on_llm_end(output, verbose=self.verbose) if run_manager: output.run = RunInfo(run_id=run_manager.run_id) return output if len(missing_prompts) > 0: run_manager = await callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, invocation_params=params, ) try: new_results = ( await self._agenerate( missing_prompts, stop=stop, run_manager=run_manager ) if new_arg_supported else await self._agenerate(missing_prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e) raise e await run_manager.on_llm_end(new_results) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = None if run_manager: run_info = RunInfo(run_id=run_manager.run_id) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) def __call__( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" if not isinstance(prompt, str): raise ValueError( "Argument `prompt` is expected to be a string. Instead found " f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " "`generate` instead." ) return ( self.generate([prompt], stop=stop, callbacks=callbacks) .generations[0][0] .text ) async def _call_async( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" result = await self.agenerate([prompt], stop=stop, callbacks=callbacks) return result.generations[0][0].text def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: if stop is None: _stop = None else: _stop = list(stop) return self(text, stop=_stop) def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = self(text, stop=_stop) return AIMessage(content=content) async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(text, stop=_stop) async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = await self._call_async(text, stop=_stop) return AIMessage(content=content) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {} def __str__(self) -> str: """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" @property @abstractmethod def _llm_type(self) -> str: """Return type of llm.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict def save(self, file_path: Union[Path, str]) -> None: """Save the LLM. Args: file_path: Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path="path/llm.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") class LLM(BaseLLM): """LLM class that expect subclasses to implement a simpler call method. The purpose of this class is to expose a simpler interface for working with LLMs, rather than expect the user to implement the full _generate method. """ @abstractmethod def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Run the LLM on the given prompt and input.""" async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> str: """Run the LLM on the given prompt and input.""" raise NotImplementedError("Async generation not implemented for this LLM.") def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. generations = [] new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") for prompt in prompts: text = ( self._call(prompt, stop=stop, run_manager=run_manager) if new_arg_supported else self._call(prompt, stop=stop) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") for prompt in prompts: text = ( await self._acall(prompt, stop=stop, run_manager=run_manager) if new_arg_supported else await self._acall(prompt, stop=stop) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations)
[ "langchain.callbacks.manager.AsyncCallbackManager.configure", "langchain.schema.Generation", "langchain.schema.get_buffer_string", "langchain.callbacks.manager.CallbackManager.configure", "langchain.schema.RunInfo", "langchain.schema.AIMessage", "langchain.llm_cache.lookup", "langchain.llm_cache.update", "langchain.schema.LLMResult" ]
[((2315, 2352), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2320, 2352), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2426, 2459), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2431, 2459), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2514, 2547), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2519, 2547), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2696, 2712), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2710, 2712), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((3148, 3191), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (3157, 3191), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((5520, 5586), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (5545, 5586), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((7818, 7889), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (7827, 7889), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((8435, 8506), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (8465, 8506), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((10893, 10964), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (10902, 10964), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12285, 12312), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12302, 12312), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12464, 12490), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (12473, 12490), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12886, 12913), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12903, 12913), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((13083, 13109), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (13092, 13109), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16290, 16324), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (16299, 16324), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((17006, 17040), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (17015, 17040), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((1248, 1294), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (1274, 1294), False, 'import langchain\n'), ((2036, 2090), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (2062, 2090), False, 'import langchain\n'), ((2903, 3005), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2916, 3005), False, 'import warnings\n'), ((14159, 14174), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14163, 14174), False, 'from pathlib import Path\n'), ((6670, 6704), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (6677, 6704), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((7625, 7659), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7632, 7659), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((9667, 9701), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (9674, 9701), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((10700, 10734), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (10707, 10734), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((14495, 14530), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (14504, 14530), False, 'import json\n'), ((5637, 5670), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (5654, 5670), False, 'import inspect\n'), ((8557, 8591), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (8574, 8591), False, 'import inspect\n'), ((14633, 14684), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (14642, 14684), False, 'import yaml\n'), ((15934, 15963), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (15951, 15963), False, 'import inspect\n'), ((16251, 16272), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16261, 16272), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16635, 16665), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (16652, 16665), False, 'import inspect\n'), ((16967, 16988), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16977, 16988), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n')]
"""Base interface for large language models to expose.""" import inspect import json import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union import yaml from pydantic import Extra, Field, root_validator, validator import langchain from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain.schema import ( AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string, ) def _get_verbosity() -> bool: return langchain.verbose def get_prompts( params: Dict[str, Any], prompts: List[str] ) -> Tuple[Dict[int, List], str, List[int], List[str]]: """Get prompts that are already cached.""" llm_string = str(sorted([(k, v) for k, v in params.items()])) missing_prompts = [] missing_prompt_idxs = [] existing_prompts = {} for i, prompt in enumerate(prompts): if langchain.llm_cache is not None: cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): existing_prompts[i] = cache_val else: missing_prompts.append(prompt) missing_prompt_idxs.append(i) return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts def update_cache( existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str], ) -> Optional[dict]: """Update the cache and get the LLM output.""" for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if langchain.llm_cache is not None: langchain.llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output class BaseLLM(BaseLanguageModel, ABC): """LLM wrapper should take in a prompt and return a string.""" cache: Optional[bool] = None verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompts.""" @abstractmethod async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompts.""" def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return self.generate(prompt_strings, stop=stop, callbacks=callbacks) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks) def generate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # If string is passed in directly no errors will be raised but outputs will # not make sense. if not isinstance(prompts, list): raise ValueError( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, invocation_params=params ) try: output = ( self._generate(prompts, stop=stop, run_manager=run_manager) if new_arg_supported else self._generate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e run_manager.on_llm_end(output) if run_manager: output.run = RunInfo(run_id=run_manager.run_id) return output if len(missing_prompts) > 0: run_manager = callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, invocation_params=params, ) try: new_results = ( self._generate(missing_prompts, stop=stop, run_manager=run_manager) if new_arg_supported else self._generate(missing_prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e run_manager.on_llm_end(new_results) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = None if run_manager: run_info = RunInfo(run_id=run_manager.run_id) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) async def agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = await callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, invocation_params=params ) try: output = ( await self._agenerate(prompts, stop=stop, run_manager=run_manager) if new_arg_supported else await self._agenerate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e, verbose=self.verbose) raise e await run_manager.on_llm_end(output, verbose=self.verbose) if run_manager: output.run = RunInfo(run_id=run_manager.run_id) return output if len(missing_prompts) > 0: run_manager = await callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, invocation_params=params, ) try: new_results = ( await self._agenerate( missing_prompts, stop=stop, run_manager=run_manager ) if new_arg_supported else await self._agenerate(missing_prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e) raise e await run_manager.on_llm_end(new_results) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = None if run_manager: run_info = RunInfo(run_id=run_manager.run_id) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) def __call__( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" if not isinstance(prompt, str): raise ValueError( "Argument `prompt` is expected to be a string. Instead found " f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " "`generate` instead." ) return ( self.generate([prompt], stop=stop, callbacks=callbacks) .generations[0][0] .text ) async def _call_async( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" result = await self.agenerate([prompt], stop=stop, callbacks=callbacks) return result.generations[0][0].text def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: if stop is None: _stop = None else: _stop = list(stop) return self(text, stop=_stop) def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = self(text, stop=_stop) return AIMessage(content=content) async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(text, stop=_stop) async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = await self._call_async(text, stop=_stop) return AIMessage(content=content) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {} def __str__(self) -> str: """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" @property @abstractmethod def _llm_type(self) -> str: """Return type of llm.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict def save(self, file_path: Union[Path, str]) -> None: """Save the LLM. Args: file_path: Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path="path/llm.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") class LLM(BaseLLM): """LLM class that expect subclasses to implement a simpler call method. The purpose of this class is to expose a simpler interface for working with LLMs, rather than expect the user to implement the full _generate method. """ @abstractmethod def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Run the LLM on the given prompt and input.""" async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> str: """Run the LLM on the given prompt and input.""" raise NotImplementedError("Async generation not implemented for this LLM.") def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. generations = [] new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") for prompt in prompts: text = ( self._call(prompt, stop=stop, run_manager=run_manager) if new_arg_supported else self._call(prompt, stop=stop) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") for prompt in prompts: text = ( await self._acall(prompt, stop=stop, run_manager=run_manager) if new_arg_supported else await self._acall(prompt, stop=stop) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations)
[ "langchain.callbacks.manager.AsyncCallbackManager.configure", "langchain.schema.Generation", "langchain.schema.get_buffer_string", "langchain.callbacks.manager.CallbackManager.configure", "langchain.schema.RunInfo", "langchain.schema.AIMessage", "langchain.llm_cache.lookup", "langchain.llm_cache.update", "langchain.schema.LLMResult" ]
[((2315, 2352), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2320, 2352), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2426, 2459), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2431, 2459), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2514, 2547), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2519, 2547), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2696, 2712), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2710, 2712), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((3148, 3191), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (3157, 3191), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((5520, 5586), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (5545, 5586), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((7818, 7889), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (7827, 7889), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((8435, 8506), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (8465, 8506), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((10893, 10964), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (10902, 10964), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12285, 12312), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12302, 12312), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12464, 12490), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (12473, 12490), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12886, 12913), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12903, 12913), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((13083, 13109), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (13092, 13109), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16290, 16324), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (16299, 16324), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((17006, 17040), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (17015, 17040), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((1248, 1294), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (1274, 1294), False, 'import langchain\n'), ((2036, 2090), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (2062, 2090), False, 'import langchain\n'), ((2903, 3005), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2916, 3005), False, 'import warnings\n'), ((14159, 14174), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14163, 14174), False, 'from pathlib import Path\n'), ((6670, 6704), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (6677, 6704), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((7625, 7659), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7632, 7659), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((9667, 9701), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (9674, 9701), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((10700, 10734), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (10707, 10734), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((14495, 14530), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (14504, 14530), False, 'import json\n'), ((5637, 5670), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (5654, 5670), False, 'import inspect\n'), ((8557, 8591), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (8574, 8591), False, 'import inspect\n'), ((14633, 14684), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (14642, 14684), False, 'import yaml\n'), ((15934, 15963), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (15951, 15963), False, 'import inspect\n'), ((16251, 16272), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16261, 16272), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16635, 16665), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (16652, 16665), False, 'import inspect\n'), ((16967, 16988), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16977, 16988), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n')]
"""Base interface for large language models to expose.""" import inspect import json import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union import yaml from pydantic import Extra, Field, root_validator, validator import langchain from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain.schema import ( AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string, ) def _get_verbosity() -> bool: return langchain.verbose def get_prompts( params: Dict[str, Any], prompts: List[str] ) -> Tuple[Dict[int, List], str, List[int], List[str]]: """Get prompts that are already cached.""" llm_string = str(sorted([(k, v) for k, v in params.items()])) missing_prompts = [] missing_prompt_idxs = [] existing_prompts = {} for i, prompt in enumerate(prompts): if langchain.llm_cache is not None: cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): existing_prompts[i] = cache_val else: missing_prompts.append(prompt) missing_prompt_idxs.append(i) return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts def update_cache( existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str], ) -> Optional[dict]: """Update the cache and get the LLM output.""" for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if langchain.llm_cache is not None: langchain.llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output class BaseLLM(BaseLanguageModel, ABC): """LLM wrapper should take in a prompt and return a string.""" cache: Optional[bool] = None verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompts.""" @abstractmethod async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompts.""" def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return self.generate(prompt_strings, stop=stop, callbacks=callbacks) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks) def generate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # If string is passed in directly no errors will be raised but outputs will # not make sense. if not isinstance(prompts, list): raise ValueError( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, invocation_params=params ) try: output = ( self._generate(prompts, stop=stop, run_manager=run_manager) if new_arg_supported else self._generate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e run_manager.on_llm_end(output) if run_manager: output.run = RunInfo(run_id=run_manager.run_id) return output if len(missing_prompts) > 0: run_manager = callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, invocation_params=params, ) try: new_results = ( self._generate(missing_prompts, stop=stop, run_manager=run_manager) if new_arg_supported else self._generate(missing_prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e run_manager.on_llm_end(new_results) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = None if run_manager: run_info = RunInfo(run_id=run_manager.run_id) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) async def agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = await callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, invocation_params=params ) try: output = ( await self._agenerate(prompts, stop=stop, run_manager=run_manager) if new_arg_supported else await self._agenerate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e, verbose=self.verbose) raise e await run_manager.on_llm_end(output, verbose=self.verbose) if run_manager: output.run = RunInfo(run_id=run_manager.run_id) return output if len(missing_prompts) > 0: run_manager = await callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, invocation_params=params, ) try: new_results = ( await self._agenerate( missing_prompts, stop=stop, run_manager=run_manager ) if new_arg_supported else await self._agenerate(missing_prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e) raise e await run_manager.on_llm_end(new_results) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = None if run_manager: run_info = RunInfo(run_id=run_manager.run_id) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) def __call__( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" if not isinstance(prompt, str): raise ValueError( "Argument `prompt` is expected to be a string. Instead found " f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " "`generate` instead." ) return ( self.generate([prompt], stop=stop, callbacks=callbacks) .generations[0][0] .text ) async def _call_async( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" result = await self.agenerate([prompt], stop=stop, callbacks=callbacks) return result.generations[0][0].text def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: if stop is None: _stop = None else: _stop = list(stop) return self(text, stop=_stop) def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = self(text, stop=_stop) return AIMessage(content=content) async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(text, stop=_stop) async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = await self._call_async(text, stop=_stop) return AIMessage(content=content) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {} def __str__(self) -> str: """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" @property @abstractmethod def _llm_type(self) -> str: """Return type of llm.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict def save(self, file_path: Union[Path, str]) -> None: """Save the LLM. Args: file_path: Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path="path/llm.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") class LLM(BaseLLM): """LLM class that expect subclasses to implement a simpler call method. The purpose of this class is to expose a simpler interface for working with LLMs, rather than expect the user to implement the full _generate method. """ @abstractmethod def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Run the LLM on the given prompt and input.""" async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> str: """Run the LLM on the given prompt and input.""" raise NotImplementedError("Async generation not implemented for this LLM.") def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. generations = [] new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") for prompt in prompts: text = ( self._call(prompt, stop=stop, run_manager=run_manager) if new_arg_supported else self._call(prompt, stop=stop) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") for prompt in prompts: text = ( await self._acall(prompt, stop=stop, run_manager=run_manager) if new_arg_supported else await self._acall(prompt, stop=stop) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations)
[ "langchain.callbacks.manager.AsyncCallbackManager.configure", "langchain.schema.Generation", "langchain.schema.get_buffer_string", "langchain.callbacks.manager.CallbackManager.configure", "langchain.schema.RunInfo", "langchain.schema.AIMessage", "langchain.llm_cache.lookup", "langchain.llm_cache.update", "langchain.schema.LLMResult" ]
[((2315, 2352), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2320, 2352), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2426, 2459), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2431, 2459), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2514, 2547), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2519, 2547), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2696, 2712), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2710, 2712), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((3148, 3191), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (3157, 3191), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((5520, 5586), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (5545, 5586), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((7818, 7889), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (7827, 7889), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((8435, 8506), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (8465, 8506), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((10893, 10964), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (10902, 10964), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12285, 12312), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12302, 12312), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12464, 12490), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (12473, 12490), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12886, 12913), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12903, 12913), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((13083, 13109), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (13092, 13109), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16290, 16324), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (16299, 16324), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((17006, 17040), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (17015, 17040), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((1248, 1294), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (1274, 1294), False, 'import langchain\n'), ((2036, 2090), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (2062, 2090), False, 'import langchain\n'), ((2903, 3005), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2916, 3005), False, 'import warnings\n'), ((14159, 14174), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14163, 14174), False, 'from pathlib import Path\n'), ((6670, 6704), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (6677, 6704), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((7625, 7659), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7632, 7659), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((9667, 9701), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (9674, 9701), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((10700, 10734), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (10707, 10734), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((14495, 14530), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (14504, 14530), False, 'import json\n'), ((5637, 5670), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (5654, 5670), False, 'import inspect\n'), ((8557, 8591), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (8574, 8591), False, 'import inspect\n'), ((14633, 14684), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (14642, 14684), False, 'import yaml\n'), ((15934, 15963), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (15951, 15963), False, 'import inspect\n'), ((16251, 16272), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16261, 16272), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16635, 16665), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (16652, 16665), False, 'import inspect\n'), ((16967, 16988), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16977, 16988), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n')]
"""Base interface for large language models to expose.""" import inspect import json import warnings from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union import yaml from pydantic import Extra, Field, root_validator, validator import langchain from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks, ) from langchain.schema import ( AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string, ) def _get_verbosity() -> bool: return langchain.verbose def get_prompts( params: Dict[str, Any], prompts: List[str] ) -> Tuple[Dict[int, List], str, List[int], List[str]]: """Get prompts that are already cached.""" llm_string = str(sorted([(k, v) for k, v in params.items()])) missing_prompts = [] missing_prompt_idxs = [] existing_prompts = {} for i, prompt in enumerate(prompts): if langchain.llm_cache is not None: cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): existing_prompts[i] = cache_val else: missing_prompts.append(prompt) missing_prompt_idxs.append(i) return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts def update_cache( existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str], ) -> Optional[dict]: """Update the cache and get the LLM output.""" for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if langchain.llm_cache is not None: langchain.llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output class BaseLLM(BaseLanguageModel, ABC): """LLM wrapper should take in a prompt and return a string.""" cache: Optional[bool] = None verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callbacks: Callbacks = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompts.""" @abstractmethod async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompts.""" def generate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return self.generate(prompt_strings, stop=stop, callbacks=callbacks) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks) def generate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # If string is passed in directly no errors will be raised but outputs will # not make sense. if not isinstance(prompts, list): raise ValueError( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._generate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, invocation_params=params ) try: output = ( self._generate(prompts, stop=stop, run_manager=run_manager) if new_arg_supported else self._generate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e run_manager.on_llm_end(output) if run_manager: output.run = RunInfo(run_id=run_manager.run_id) return output if len(missing_prompts) > 0: run_manager = callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, invocation_params=params, ) try: new_results = ( self._generate(missing_prompts, stop=stop, run_manager=run_manager) if new_arg_supported else self._generate(missing_prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) raise e run_manager.on_llm_end(new_results) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = None if run_manager: run_info = RunInfo(run_id=run_manager.run_id) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) async def agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) disregard_cache = self.cache is not None and not self.cache callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) new_arg_supported = inspect.signature(self._agenerate).parameters.get( "run_manager" ) if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = await callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, invocation_params=params ) try: output = ( await self._agenerate(prompts, stop=stop, run_manager=run_manager) if new_arg_supported else await self._agenerate(prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e, verbose=self.verbose) raise e await run_manager.on_llm_end(output, verbose=self.verbose) if run_manager: output.run = RunInfo(run_id=run_manager.run_id) return output if len(missing_prompts) > 0: run_manager = await callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, invocation_params=params, ) try: new_results = ( await self._agenerate( missing_prompts, stop=stop, run_manager=run_manager ) if new_arg_supported else await self._agenerate(missing_prompts, stop=stop) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e) raise e await run_manager.on_llm_end(new_results) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) run_info = None if run_manager: run_info = RunInfo(run_id=run_manager.run_id) else: llm_output = {} run_info = None generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output, run=run_info) def __call__( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" if not isinstance(prompt, str): raise ValueError( "Argument `prompt` is expected to be a string. Instead found " f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " "`generate` instead." ) return ( self.generate([prompt], stop=stop, callbacks=callbacks) .generations[0][0] .text ) async def _call_async( self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" result = await self.agenerate([prompt], stop=stop, callbacks=callbacks) return result.generations[0][0].text def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: if stop is None: _stop = None else: _stop = list(stop) return self(text, stop=_stop) def predict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = self(text, stop=_stop) return AIMessage(content=content) async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: if stop is None: _stop = None else: _stop = list(stop) return await self._call_async(text, stop=_stop) async def apredict_messages( self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = await self._call_async(text, stop=_stop) return AIMessage(content=content) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {} def __str__(self) -> str: """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" @property @abstractmethod def _llm_type(self) -> str: """Return type of llm.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict def save(self, file_path: Union[Path, str]) -> None: """Save the LLM. Args: file_path: Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path="path/llm.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") class LLM(BaseLLM): """LLM class that expect subclasses to implement a simpler call method. The purpose of this class is to expose a simpler interface for working with LLMs, rather than expect the user to implement the full _generate method. """ @abstractmethod def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Run the LLM on the given prompt and input.""" async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> str: """Run the LLM on the given prompt and input.""" raise NotImplementedError("Async generation not implemented for this LLM.") def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. generations = [] new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") for prompt in prompts: text = ( self._call(prompt, stop=stop, run_manager=run_manager) if new_arg_supported else self._call(prompt, stop=stop) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> LLMResult: """Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") for prompt in prompts: text = ( await self._acall(prompt, stop=stop, run_manager=run_manager) if new_arg_supported else await self._acall(prompt, stop=stop) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations)
[ "langchain.callbacks.manager.AsyncCallbackManager.configure", "langchain.schema.Generation", "langchain.schema.get_buffer_string", "langchain.callbacks.manager.CallbackManager.configure", "langchain.schema.RunInfo", "langchain.schema.AIMessage", "langchain.llm_cache.lookup", "langchain.llm_cache.update", "langchain.schema.LLMResult" ]
[((2315, 2352), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2320, 2352), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2426, 2459), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2431, 2459), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2514, 2547), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2519, 2547), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2696, 2712), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2710, 2712), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((3148, 3191), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (3157, 3191), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((5520, 5586), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (5545, 5586), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((7818, 7889), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (7827, 7889), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((8435, 8506), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (8465, 8506), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((10893, 10964), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (10902, 10964), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12285, 12312), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12302, 12312), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12464, 12490), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (12473, 12490), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12886, 12913), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12903, 12913), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((13083, 13109), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (13092, 13109), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16290, 16324), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (16299, 16324), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((17006, 17040), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (17015, 17040), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((1248, 1294), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (1274, 1294), False, 'import langchain\n'), ((2036, 2090), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (2062, 2090), False, 'import langchain\n'), ((2903, 3005), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2916, 3005), False, 'import warnings\n'), ((14159, 14174), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14163, 14174), False, 'from pathlib import Path\n'), ((6670, 6704), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (6677, 6704), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((7625, 7659), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7632, 7659), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((9667, 9701), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (9674, 9701), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((10700, 10734), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (10707, 10734), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((14495, 14530), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (14504, 14530), False, 'import json\n'), ((5637, 5670), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (5654, 5670), False, 'import inspect\n'), ((8557, 8591), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (8574, 8591), False, 'import inspect\n'), ((14633, 14684), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (14642, 14684), False, 'import yaml\n'), ((15934, 15963), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (15951, 15963), False, 'import inspect\n'), ((16251, 16272), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16261, 16272), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16635, 16665), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (16652, 16665), False, 'import inspect\n'), ((16967, 16988), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16977, 16988), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n')]
"""Base interface for large language models to expose.""" import json from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Tuple, Union import yaml from pydantic import BaseModel, Extra, Field, validator import langchain from langchain.callbacks import get_callback_manager from langchain.callbacks.base import BaseCallbackManager from langchain.schema import Generation, LLMResult def _get_verbosity() -> bool: return langchain.verbose def get_prompts( params: Dict[str, Any], prompts: List[str] ) -> Tuple[Dict[int, List], str, List[int], List[str]]: """Get prompts that are already cached.""" llm_string = str(sorted([(k, v) for k, v in params.items()])) missing_prompts = [] missing_prompt_idxs = [] existing_prompts = {} for i, prompt in enumerate(prompts): if langchain.llm_cache is not None: cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): existing_prompts[i] = cache_val else: missing_prompts.append(prompt) missing_prompt_idxs.append(i) return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts def update_cache( existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str], ) -> Optional[dict]: """Update the cache and get the LLM output.""" for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if langchain.llm_cache is not None: langchain.llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output class BaseLLM(BaseModel, ABC): """LLM wrapper should take in a prompt and return a string.""" cache: Optional[bool] = None verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @validator("callback_manager", pre=True, always=True) def set_callback_manager( cls, callback_manager: Optional[BaseCallbackManager] ) -> BaseCallbackManager: """If callback manager is None, set it. This allows users to pass in None as callback manager, which is a nice UX. """ return callback_manager or get_callback_manager() @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompts.""" @abstractmethod async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompts.""" def generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompt and input.""" # If string is passed in directly no errors will be raised but outputs will # not make sense. if not isinstance(prompts, list): raise ValueError( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) disregard_cache = self.cache is not None and not self.cache if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, verbose=self.verbose ) try: output = self._generate(prompts, stop=stop) except (KeyboardInterrupt, Exception) as e: self.callback_manager.on_llm_error(e, verbose=self.verbose) raise e self.callback_manager.on_llm_end(output, verbose=self.verbose) return output params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) if len(missing_prompts) > 0: self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose ) try: new_results = self._generate(missing_prompts, stop=stop) except (KeyboardInterrupt, Exception) as e: self.callback_manager.on_llm_error(e, verbose=self.verbose) raise e self.callback_manager.on_llm_end(new_results, verbose=self.verbose) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) else: llm_output = {} generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output) async def agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompt and input.""" disregard_cache = self.cache is not None and not self.cache if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) if self.callback_manager.is_async: await self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, verbose=self.verbose ) else: self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, verbose=self.verbose ) try: output = await self._agenerate(prompts, stop=stop) except (KeyboardInterrupt, Exception) as e: if self.callback_manager.is_async: await self.callback_manager.on_llm_error(e, verbose=self.verbose) else: self.callback_manager.on_llm_error(e, verbose=self.verbose) raise e if self.callback_manager.is_async: await self.callback_manager.on_llm_end(output, verbose=self.verbose) else: self.callback_manager.on_llm_end(output, verbose=self.verbose) return output params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) if len(missing_prompts) > 0: if self.callback_manager.is_async: await self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose, ) else: self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose, ) try: new_results = await self._agenerate(missing_prompts, stop=stop) except (KeyboardInterrupt, Exception) as e: if self.callback_manager.is_async: await self.callback_manager.on_llm_error(e, verbose=self.verbose) else: self.callback_manager.on_llm_error(e, verbose=self.verbose) raise e if self.callback_manager.is_async: await self.callback_manager.on_llm_end( new_results, verbose=self.verbose ) else: self.callback_manager.on_llm_end(new_results, verbose=self.verbose) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) else: llm_output = {} generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output) def get_num_tokens(self, text: str) -> int: """Get the number of tokens present in the text.""" # TODO: this method may not be exact. # TODO: this method may differ based on model (eg codex). try: from transformers import GPT2TokenizerFast except ImportError: raise ValueError( "Could not import transformers python package. " "This is needed in order to calculate get_num_tokens. " "Please it install it with `pip install transformers`." ) # create a GPT-3 tokenizer instance tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") # tokenize the text using the GPT-3 tokenizer tokenized_text = tokenizer.tokenize(text) # calculate the number of tokens in the tokenized text return len(tokenized_text) def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Check Cache and run the LLM on the given prompt and input.""" return self.generate([prompt], stop=stop).generations[0][0].text @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {} def __str__(self) -> str: """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" @property @abstractmethod def _llm_type(self) -> str: """Return type of llm.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict def save(self, file_path: Union[Path, str]) -> None: """Save the LLM. Args: file_path: Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path="path/llm.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") class LLM(BaseLLM): """LLM class that expect subclasses to implement a simpler call method. The purpose of this class is to expose a simpler interface for working with LLMs, rather than expect the user to implement the full _generate method. """ @abstractmethod def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Run the LLM on the given prompt and input.""" def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. generations = [] for prompt in prompts: text = self._call(prompt, stop=stop) generations.append([Generation(text=text)]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompt and input.""" raise NotImplementedError("Async generation not implemented for this LLM.")
[ "langchain.schema.Generation", "langchain.llm_cache.update", "langchain.llm_cache.lookup", "langchain.schema.LLMResult", "langchain.callbacks.get_callback_manager" ]
[((1991, 2028), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1996, 2028), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2119, 2162), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager'}), '(default_factory=get_callback_manager)\n', (2124, 2162), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2311, 2363), 'pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)', 'always': '(True)'}), "('callback_manager', pre=True, always=True)\n", (2320, 2363), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2693, 2736), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (2702, 2736), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((5769, 5826), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (5778, 5826), False, 'from langchain.schema import Generation, LLMResult\n'), ((9134, 9191), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (9143, 9191), False, 'from langchain.schema import Generation, LLMResult\n'), ((9826, 9867), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (9859, 9867), False, 'from transformers import GPT2TokenizerFast\n'), ((12744, 12778), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (12753, 12778), False, 'from langchain.schema import Generation, LLMResult\n'), ((932, 978), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (958, 978), False, 'import langchain\n'), ((1720, 1774), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (1746, 1774), False, 'import langchain\n'), ((2664, 2686), 'langchain.callbacks.get_callback_manager', 'get_callback_manager', ([], {}), '()\n', (2684, 2686), False, 'from langchain.callbacks import get_callback_manager\n'), ((11346, 11361), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11350, 11361), False, 'from pathlib import Path\n'), ((11682, 11717), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (11691, 11717), False, 'import json\n'), ((11820, 11871), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (11829, 11871), False, 'import yaml\n'), ((12705, 12726), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (12715, 12726), False, 'from langchain.schema import Generation, LLMResult\n')]
"""Base interface for large language models to expose.""" import json from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Tuple, Union import yaml from pydantic import BaseModel, Extra, Field, validator import langchain from langchain.callbacks import get_callback_manager from langchain.callbacks.base import BaseCallbackManager from langchain.schema import Generation, LLMResult def _get_verbosity() -> bool: return langchain.verbose def get_prompts( params: Dict[str, Any], prompts: List[str] ) -> Tuple[Dict[int, List], str, List[int], List[str]]: """Get prompts that are already cached.""" llm_string = str(sorted([(k, v) for k, v in params.items()])) missing_prompts = [] missing_prompt_idxs = [] existing_prompts = {} for i, prompt in enumerate(prompts): if langchain.llm_cache is not None: cache_val = langchain.llm_cache.lookup(prompt, llm_string) if isinstance(cache_val, list): existing_prompts[i] = cache_val else: missing_prompts.append(prompt) missing_prompt_idxs.append(i) return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts def update_cache( existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str], ) -> Optional[dict]: """Update the cache and get the LLM output.""" for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if langchain.llm_cache is not None: langchain.llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output class BaseLLM(BaseModel, ABC): """LLM wrapper should take in a prompt and return a string.""" cache: Optional[bool] = None verbose: bool = Field(default_factory=_get_verbosity) """Whether to print out response text.""" callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @validator("callback_manager", pre=True, always=True) def set_callback_manager( cls, callback_manager: Optional[BaseCallbackManager] ) -> BaseCallbackManager: """If callback manager is None, set it. This allows users to pass in None as callback manager, which is a nice UX. """ return callback_manager or get_callback_manager() @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompts.""" @abstractmethod async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompts.""" def generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompt and input.""" # If string is passed in directly no errors will be raised but outputs will # not make sense. if not isinstance(prompts, list): raise ValueError( "Argument 'prompts' is expected to be of type List[str], received" f" argument of type {type(prompts)}." ) disregard_cache = self.cache is not None and not self.cache if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, verbose=self.verbose ) try: output = self._generate(prompts, stop=stop) except (KeyboardInterrupt, Exception) as e: self.callback_manager.on_llm_error(e, verbose=self.verbose) raise e self.callback_manager.on_llm_end(output, verbose=self.verbose) return output params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) if len(missing_prompts) > 0: self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose ) try: new_results = self._generate(missing_prompts, stop=stop) except (KeyboardInterrupt, Exception) as e: self.callback_manager.on_llm_error(e, verbose=self.verbose) raise e self.callback_manager.on_llm_end(new_results, verbose=self.verbose) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) else: llm_output = {} generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output) async def agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompt and input.""" disregard_cache = self.cache is not None and not self.cache if langchain.llm_cache is None or disregard_cache: # This happens when langchain.cache is None, but self.cache is True if self.cache is not None and self.cache: raise ValueError( "Asked to cache, but no cache found at `langchain.cache`." ) if self.callback_manager.is_async: await self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, verbose=self.verbose ) else: self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, prompts, verbose=self.verbose ) try: output = await self._agenerate(prompts, stop=stop) except (KeyboardInterrupt, Exception) as e: if self.callback_manager.is_async: await self.callback_manager.on_llm_error(e, verbose=self.verbose) else: self.callback_manager.on_llm_error(e, verbose=self.verbose) raise e if self.callback_manager.is_async: await self.callback_manager.on_llm_end(output, verbose=self.verbose) else: self.callback_manager.on_llm_end(output, verbose=self.verbose) return output params = self.dict() params["stop"] = stop ( existing_prompts, llm_string, missing_prompt_idxs, missing_prompts, ) = get_prompts(params, prompts) if len(missing_prompts) > 0: if self.callback_manager.is_async: await self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose, ) else: self.callback_manager.on_llm_start( {"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose, ) try: new_results = await self._agenerate(missing_prompts, stop=stop) except (KeyboardInterrupt, Exception) as e: if self.callback_manager.is_async: await self.callback_manager.on_llm_error(e, verbose=self.verbose) else: self.callback_manager.on_llm_error(e, verbose=self.verbose) raise e if self.callback_manager.is_async: await self.callback_manager.on_llm_end( new_results, verbose=self.verbose ) else: self.callback_manager.on_llm_end(new_results, verbose=self.verbose) llm_output = update_cache( existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts ) else: llm_output = {} generations = [existing_prompts[i] for i in range(len(prompts))] return LLMResult(generations=generations, llm_output=llm_output) def get_num_tokens(self, text: str) -> int: """Get the number of tokens present in the text.""" # TODO: this method may not be exact. # TODO: this method may differ based on model (eg codex). try: from transformers import GPT2TokenizerFast except ImportError: raise ValueError( "Could not import transformers python package. " "This is needed in order to calculate get_num_tokens. " "Please it install it with `pip install transformers`." ) # create a GPT-3 tokenizer instance tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") # tokenize the text using the GPT-3 tokenizer tokenized_text = tokenizer.tokenize(text) # calculate the number of tokens in the tokenized text return len(tokenized_text) def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Check Cache and run the LLM on the given prompt and input.""" return self.generate([prompt], stop=stop).generations[0][0].text @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {} def __str__(self) -> str: """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" @property @abstractmethod def _llm_type(self) -> str: """Return type of llm.""" def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict["_type"] = self._llm_type return starter_dict def save(self, file_path: Union[Path, str]) -> None: """Save the LLM. Args: file_path: Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path="path/llm.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") class LLM(BaseLLM): """LLM class that expect subclasses to implement a simpler call method. The purpose of this class is to expose a simpler interface for working with LLMs, rather than expect the user to implement the full _generate method. """ @abstractmethod def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Run the LLM on the given prompt and input.""" def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. generations = [] for prompt in prompts: text = self._call(prompt, stop=stop) generations.append([Generation(text=text)]) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Run the LLM on the given prompt and input.""" raise NotImplementedError("Async generation not implemented for this LLM.")
[ "langchain.schema.Generation", "langchain.llm_cache.update", "langchain.llm_cache.lookup", "langchain.schema.LLMResult", "langchain.callbacks.get_callback_manager" ]
[((1991, 2028), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1996, 2028), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2119, 2162), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager'}), '(default_factory=get_callback_manager)\n', (2124, 2162), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2311, 2363), 'pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)', 'always': '(True)'}), "('callback_manager', pre=True, always=True)\n", (2320, 2363), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2693, 2736), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (2702, 2736), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((5769, 5826), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (5778, 5826), False, 'from langchain.schema import Generation, LLMResult\n'), ((9134, 9191), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (9143, 9191), False, 'from langchain.schema import Generation, LLMResult\n'), ((9826, 9867), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (9859, 9867), False, 'from transformers import GPT2TokenizerFast\n'), ((12744, 12778), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (12753, 12778), False, 'from langchain.schema import Generation, LLMResult\n'), ((932, 978), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (958, 978), False, 'import langchain\n'), ((1720, 1774), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (1746, 1774), False, 'import langchain\n'), ((2664, 2686), 'langchain.callbacks.get_callback_manager', 'get_callback_manager', ([], {}), '()\n', (2684, 2686), False, 'from langchain.callbacks import get_callback_manager\n'), ((11346, 11361), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11350, 11361), False, 'from pathlib import Path\n'), ((11682, 11717), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (11691, 11717), False, 'import json\n'), ((11820, 11871), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (11829, 11871), False, 'import yaml\n'), ((12705, 12726), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (12715, 12726), False, 'from langchain.schema import Generation, LLMResult\n')]
import discord from discord import app_commands from discord.ext import commands import langchain from langchain.document_loaders import YoutubeLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.summarize import load_summarize_chain import torch class YoutubeSummaryCog(commands.Cog): def __init__(self, bot): self.bot = bot self.llm = self.bot.llm @app_commands.command(name="youtubesummary", description="Summarize a YouTube video given its URL") async def summarize(self, interaction: discord.Interaction, url: str): await interaction.response.defer() # Notifies the user that the bot is processing their command. await interaction.followup.send( embed=discord.Embed( title=f"{interaction.user.display_name} used Youtube Summary 📺", description=f"Summarizing {url} \nGenerating response\nPlease wait..", color=0x9C84EF ) ) try: # Load transcript loader = YoutubeLoader.from_youtube_url(url) transcript = loader.load() # Split text text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=50) texts = text_splitter.split_documents(transcript) # Create and configure chain chain = load_summarize_chain(llm=self.llm, chain_type="map_reduce", verbose=True) # chain.llm_chain.prompt.template = \ # """### Instruction: # Write a 1-3 paragraph summary the following: # "{text}" # ### Response: # 1-3 PARAGRAPH SUMMARY:""" # Run the chain and get summary summary = chain.run(texts) await interaction.followup.send(f'Summary:\n{summary}') except Exception as e: await interaction.channel.send(f'Sorry, an error occurred: {str(e)}') async def setup(bot): await bot.add_cog(YoutubeSummaryCog(bot))
[ "langchain.chains.summarize.load_summarize_chain", "langchain.document_loaders.YoutubeLoader.from_youtube_url", "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((425, 528), 'discord.app_commands.command', 'app_commands.command', ([], {'name': '"""youtubesummary"""', 'description': '"""Summarize a YouTube video given its URL"""'}), "(name='youtubesummary', description=\n 'Summarize a YouTube video given its URL')\n", (445, 528), False, 'from discord import app_commands\n'), ((1074, 1109), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['url'], {}), '(url)\n', (1104, 1109), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((1203, 1268), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(2000)', 'chunk_overlap': '(50)'}), '(chunk_size=2000, chunk_overlap=50)\n', (1233, 1268), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1393, 1466), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'self.llm', 'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(llm=self.llm, chain_type='map_reduce', verbose=True)\n", (1413, 1466), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((772, 954), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""{interaction.user.display_name} used Youtube Summary 📺"""', 'description': 'f"""Summarizing {url} \nGenerating response\nPlease wait.."""', 'color': '(10257647)'}), '(title=\n f\'{interaction.user.display_name} used Youtube Summary 📺\', description=\n f"""Summarizing {url} \nGenerating response\nPlease wait..""", color=10257647\n )\n', (785, 954), False, 'import discord\n')]
"""Base interface that all chains should implement.""" import json from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Dict, List, Optional, Union import yaml from pydantic import BaseModel, Extra, Field, validator import langchain from langchain.callbacks import get_callback_manager from langchain.callbacks.base import BaseCallbackManager class Memory(BaseModel, ABC): """Base interface for memory in chains.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property @abstractmethod def memory_variables(self) -> List[str]: """Input keys this memory class will load dynamically.""" @abstractmethod def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return key-value pairs given the text input to the chain.""" @abstractmethod def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save the context of this model run to memory.""" @abstractmethod def clear(self) -> None: """Clear memory contents.""" def _get_verbosity() -> bool: return langchain.verbose class Chain(BaseModel, ABC): """Base interface that all chains should implement.""" memory: Optional[Memory] = None callback_manager: BaseCallbackManager = Field( default_factory=get_callback_manager, exclude=True ) verbose: bool = Field( default_factory=_get_verbosity ) # Whether to print the response text class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @property def _chain_type(self) -> str: raise NotImplementedError("Saving not supported for this chain type.") @validator("callback_manager", pre=True, always=True) def set_callback_manager( cls, callback_manager: Optional[BaseCallbackManager] ) -> BaseCallbackManager: """If callback manager is None, set it. This allows users to pass in None as callback manager, which is a nice UX. """ return callback_manager or get_callback_manager() @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose @property @abstractmethod def input_keys(self) -> List[str]: """Input keys this chain expects.""" @property @abstractmethod def output_keys(self) -> List[str]: """Output keys this chain expects.""" def _validate_inputs(self, inputs: Dict[str, str]) -> None: """Check that all inputs are present.""" missing_keys = set(self.input_keys).difference(inputs) if missing_keys: raise ValueError(f"Missing some input keys: {missing_keys}") def _validate_outputs(self, outputs: Dict[str, str]) -> None: if set(outputs) != set(self.output_keys): raise ValueError( f"Did not get output keys that were expected. " f"Got: {set(outputs)}. Expected: {set(self.output_keys)}." ) @abstractmethod def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: """Run the logic of this chain and return the output.""" async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]: """Run the logic of this chain and return the output.""" raise NotImplementedError("Async call not supported for this chain type.") def __call__( self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False ) -> Dict[str, Any]: """Run the logic of this chain and add to output if desired. Args: inputs: Dictionary of inputs, or single input if chain expects only one param. return_only_outputs: boolean for whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. """ inputs = self.prep_inputs(inputs) self.callback_manager.on_chain_start( {"name": self.__class__.__name__}, inputs, verbose=self.verbose, ) try: outputs = self._call(inputs) except (KeyboardInterrupt, Exception) as e: self.callback_manager.on_chain_error(e, verbose=self.verbose) raise e self.callback_manager.on_chain_end(outputs, verbose=self.verbose) return self.prep_outputs(inputs, outputs, return_only_outputs) async def acall( self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False ) -> Dict[str, Any]: """Run the logic of this chain and add to output if desired. Args: inputs: Dictionary of inputs, or single input if chain expects only one param. return_only_outputs: boolean for whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. """ inputs = self.prep_inputs(inputs) if self.callback_manager.is_async: await self.callback_manager.on_chain_start( {"name": self.__class__.__name__}, inputs, verbose=self.verbose, ) else: self.callback_manager.on_chain_start( {"name": self.__class__.__name__}, inputs, verbose=self.verbose, ) try: outputs = await self._acall(inputs) except (KeyboardInterrupt, Exception) as e: if self.callback_manager.is_async: await self.callback_manager.on_chain_error(e, verbose=self.verbose) else: self.callback_manager.on_chain_error(e, verbose=self.verbose) raise e if self.callback_manager.is_async: await self.callback_manager.on_chain_end(outputs, verbose=self.verbose) else: self.callback_manager.on_chain_end(outputs, verbose=self.verbose) return self.prep_outputs(inputs, outputs, return_only_outputs) def prep_outputs( self, inputs: Dict[str, str], outputs: Dict[str, str], return_only_outputs: bool = False, ) -> Dict[str, str]: """Validate and prep outputs.""" self._validate_outputs(outputs) if self.memory is not None: self.memory.save_context(inputs, outputs) if return_only_outputs: return outputs else: return {**inputs, **outputs} def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]: """Validate and prep inputs.""" if not isinstance(inputs, dict): _input_keys = set(self.input_keys) if self.memory is not None: # If there are multiple input keys, but some get set by memory so that # only one is not set, we can still figure out which key it is. _input_keys = _input_keys.difference(self.memory.memory_variables) if len(_input_keys) != 1: raise ValueError( f"A single string input was passed in, but this chain expects " f"multiple inputs ({_input_keys}). When a chain expects " f"multiple inputs, please call it by passing in a dictionary, " "eg `chain({'foo': 1, 'bar': 2})`" ) inputs = {list(_input_keys)[0]: inputs} if self.memory is not None: external_context = self.memory.load_memory_variables(inputs) inputs = dict(inputs, **external_context) self._validate_inputs(inputs) return inputs def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]: """Call the chain on all inputs in the list.""" return [self(inputs) for inputs in input_list] def conversation(self, *args: str, **kwargs: str) -> List[str]: """Run the chain as text in, text out or multiple variables, text out.""" if len(self.output_keys) == 2: assert "output" in self.output_keys and "intermediate_steps" in self.output_keys keep_short = False if "keep_short" in kwargs: keep_short = kwargs.pop("keep_short") outputs = {} if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") outputs = self(args[0]) if kwargs and not args: outputs = self(kwargs) intermediate = outputs.get("intermediate_steps") or [] conversation = [] for action, action_output in intermediate: action: str = action.log.strip() if not action.startswith(f"AI:"): action = f"AI: {action}" if keep_short: # Hide the internal conversation lines = action.split("\n") new_lines = [] for l in lines: for term in ["Assistant,"]: idx = l.lower().find(term.lower()) if idx >= 0: l = l[:idx] if l.lower().strip() == "ai:": l = "" if not l: continue new_lines.append(l) action = "\n".join(new_lines) conversation.append(action) if not keep_short or action_output.lstrip().startswith("Here is the edited image"): if not action_output.startswith("Assistant:"): action_output = f"Assistant: {action_output}" conversation.append(action_output) conversation.append("AI: " + outputs["output"]) return conversation if len(self.output_keys) != 1: raise ValueError( f"`run` not supported when there is not exactly " f"one output key. Got {self.output_keys}." ) if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") return ["AI: " + self(args[0])[self.output_keys[0]]] if kwargs and not args: return ["AI: " + self(kwargs)[self.output_keys[0]]] raise ValueError( f"`run` supported with either positional arguments or keyword arguments" f" but not both. Got args: {args} and kwargs: {kwargs}." ) def run(self, *args: str, **kwargs: str) -> str: """Run the chain as text in, text out or multiple variables, text out.""" if len(self.output_keys) == 2: assert "output" in self.output_keys and "intermediate_steps" in self.output_keys outputs = {} if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") outputs = self(args[0]) if kwargs and not args: outputs = self(kwargs) intermediate = outputs.get("intermediate_steps") or [] assistant = "" for action, action_output in intermediate: action: str = action.log.strip() if not action.startswith(f"AI:"): action = f"AI: {action}" if not action_output.startswith("Assistant:"): action_output = f"Assistant: {action_output}" assistant += "\n" + action + "\n" + action_output return assistant + "\n" + "AI: " + outputs["output"] if len(self.output_keys) != 1: raise ValueError( f"`run` not supported when there is not exactly " f"one output key. Got {self.output_keys}." ) if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") return self(args[0])[self.output_keys[0]] if kwargs and not args: return self(kwargs)[self.output_keys[0]] raise ValueError( f"`run` supported with either positional arguments or keyword arguments" f" but not both. Got args: {args} and kwargs: {kwargs}." ) async def arun(self, *args: str, **kwargs: str) -> str: """Run the chain as text in, text out or multiple variables, text out.""" if len(self.output_keys) != 1: raise ValueError( f"`run` not supported when there is not exactly " f"one output key. Got {self.output_keys}." ) if args and not kwargs: if len(args) != 1: raise ValueError("`run` supports only one positional argument.") return (await self.acall(args[0]))[self.output_keys[0]] if kwargs and not args: return (await self.acall(kwargs))[self.output_keys[0]] raise ValueError( f"`run` supported with either positional arguments or keyword arguments" f" but not both. Got args: {args} and kwargs: {kwargs}." ) def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of chain.""" if self.memory is not None: raise ValueError("Saving of memory is not yet supported.") _dict = super().dict() _dict["_type"] = self._chain_type return _dict def save(self, file_path: Union[Path, str]) -> None: """Save the chain. Args: file_path: Path to file to save the chain to. Example: .. code-block:: python chain.save(file_path="path/chain.yaml") """ # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save chain_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(chain_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(chain_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml")
[ "langchain.callbacks.get_callback_manager" ]
[((1401, 1458), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager', 'exclude': '(True)'}), '(default_factory=get_callback_manager, exclude=True)\n', (1406, 1458), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((1493, 1530), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1498, 1530), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((1830, 1882), 'pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)', 'always': '(True)'}), "('callback_manager', pre=True, always=True)\n", (1839, 1882), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2212, 2255), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (2221, 2255), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2183, 2205), 'langchain.callbacks.get_callback_manager', 'get_callback_manager', ([], {}), '()\n', (2203, 2205), False, 'from langchain.callbacks import get_callback_manager\n'), ((14634, 14649), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14638, 14649), False, 'from pathlib import Path\n'), ((14969, 15003), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (14978, 15003), False, 'import json\n'), ((15106, 15156), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (15115, 15156), False, 'import yaml\n')]
#!/Users/mark/dev/ml/langchain/read_github/langchain_github/env/bin/python # change above to the location of your local Python venv installation import sys, os, shutil parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(parent_dir) import pathlib from langchain.docstore.document import Document import langchain.text_splitter as text_splitter from langchain.chat_models import ChatOpenAI from my_llm import standards as my_llm from my_llm.langchain_class import PubSubChatMessageHistory from langchain import PromptTemplate from langchain.document_loaders.unstructured import UnstructuredFileLoader import logging chat = ChatOpenAI(temperature=0) CODE_EXTENSIONS = [".py", ".js", ".java", ".c", ".cpp", ".cc", ".cxx", ".hpp", ".h", ".cs", ".m", ".swift", ".go", ".rs", ".rb", ".php", ".pl", ".kt", ".kts", ".ts", ".scala", ".hs", ".lua", ".sh", ".bash", ".r", ".m", ".sql", ".html", ".css", ".xml", ".json", ".yaml", ".yml"] # Get Markdown documents from a repository def get_repo_docs(repo_path, extension, memory, ignore=None, resummarise=False, verbose=False): repo = pathlib.Path(repo_path) ignore_path = "" if ignore is not None: ignore_path = repo / ignore if not ignore_path.is_dir(): print("WARNING: --ignore must be a directory") print('Ignoring %s' % ignore_path) exts = extension.split(",") for ext in exts: the_glob = f"**/*{ext}" matched_files = list(repo.glob(the_glob)) num_matched_files = len(matched_files) print(f"Number of matched {ext} files: {num_matched_files}") # Generate summary md files if ext!=".md": k = 0 for non_md_file in repo.glob(the_glob): k += 1 if str(non_md_file).startswith(str(ignore_path)): continue generate_summary(non_md_file, memory, resummarise=resummarise, verbose=verbose) if verbose: print(f"Generated summary for a {ext} file: {k} of {num_matched_files} done.") # Iterate over all files in the repo (including subdirectories) print(f"Reading {ext} files") i = 0 j = 0 for md_file in repo.glob(the_glob): if str(md_file).startswith(str(ignore_path)): j += 1 continue i += 1 # Read the content of the file yield read_file_to_document(md_file) if verbose: print(f"Read {i} files so far and ignored {j}: total: {num_matched_files}") print(f"Read {i} and ignored {j} {ext} files.") print("Read all files") def read_file_to_document(md_file, split=False, metadata: dict = None): try: loader = UnstructuredFileLoader(md_file) if split: # only supported for some file types docs = loader.load_and_split() else: docs = loader.load() except ValueError as e: if "file type is not supported in partition" in str(e): # Convert the file to .txt and try again txt_file = convert_to_txt(md_file) loader = UnstructuredFileLoader(txt_file) if split: docs = loader.load_and_split() else: docs = loader.load() os.remove(txt_file) # Remove the temporary .txt file after processing else: raise e for doc in docs: if metadata is not None: doc.metadata.update(metadata) return docs def convert_to_txt(file_path): file_dir, file_name = os.path.split(file_path) file_base, file_ext = os.path.splitext(file_name) txt_file = os.path.join(file_dir, f"{file_base}.txt") shutil.copyfile(file_path, txt_file) return txt_file def code_prompt(): # create prompt to pass in to LLM template = """ Summarise what the code does below. Use Markdown in your output with the following template: # a title summary of script purpose ## keywords Comma seperated list of 3-4 keywords suitable for this code ## classes A description of each class ## functions/methods How the functions or methods of a class work including listing the Inputs and outputs for each function ## code examples of use The code to summarise is here: {txt} """ return PromptTemplate( input_variables=["txt"], template=template, ) def text_prompt(): # create prompt to pass in to LLM template = """ Summarise the text below, and add some keywords at the bottom to describe the overall purpose of the text. The text to summarise is here: {txt} """ return PromptTemplate( input_variables=["txt"], template=template, ) # Function to summarise code from the OpenAI API def generate_summary(a_file: pathlib.Path, memory, resummarise: bool=False, verbose: bool=False): if a_file.is_dir(): raise ValueError(f"a_file must not be a directory: {a_file}") new_file_name = a_file.with_suffix('.md') if os.path.isfile(new_file_name) and not resummarise: if verbose: print(f"Skipping generating summary as found existing code summary file: {new_file_name}") return try: with open(a_file, "r") as file: file_text = file.read() except Exception as e: print(f"Error generating summary: {str(e)}") return if len(file_text) < 10: if verbose: print(f"Skipping generation as not enough information. Got: {file_text}") return document = Document(page_content=file_text, metadata = {"source": os.path.abspath(a_file)}) source_chunks = chunk_doc_to_docs([document], a_file.suffix) code = True if str(a_file.suffix).lower() in CODE_EXTENSIONS else False if code: print("================================================") print(f"Requesting code summary for {a_file} ") print("================================================") prompt = code_prompt() else: print("================================================") print(f"Requesting text summary for {a_file} ") print("================================================") prompt = text_prompt() num_chunks = len(source_chunks) i=0 for chunk in source_chunks: logging.info(f"Summarising chunk {i} of {num_chunks} of {a_file}") i += 1 summary = my_llm.request_llm( prompt.format(txt=chunk.page_content), chat, memory, metadata={'task':'summarise_chunk'}) my_llm.save_to_file(new_file_name, summary + '\n\n', type = "a") return pathlib.Path(new_file_name) # Get source chunks from a repository def get_source_docs(repo_path, extension, memory, ignore, resummarise, verbose): source_chunks = [] for source in get_repo_docs(repo_path, extension=extension, memory=memory, ignore=ignore, resummarise=resummarise, verbose=verbose): splitter = choose_splitter(extension) for chunk in splitter.split_text(source.page_content): source_chunks.append(Document(page_content=chunk, metadata=source.metadata)) return source_chunks def choose_splitter(extension: str, chunk_size: int=1024, chunk_overlap:int=0): if extension == ".py": return text_splitter.PythonCodeTextSplitter() elif extension == ".md": return text_splitter.MarkdownTextSplitter() return text_splitter.RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) def setup_memory(config): memory = PubSubChatMessageHistory("qna_documents") if config.get('bucket_name', None) is not None: memory.set_bucket(config.get('bucket_name')) memory.load_vectorstore_memory() if config['reindex']: # Create a new Chroma DB exts = '.md,.py' if config['ext']: exts = config['ext'] source_chunks = get_source_docs(config['repo'], extension=exts, memory=memory, ignore=config['ignore'], resummarise=config['resummarise'], verbose=config['verbose']) memory.save_vectorstore_memory(source_chunks, verbose=config['verbose']) return memory def document_to_dict(document): return { 'page_content': document.page_content, 'metadata': document.metadata, } def process_input(user_input: str, verbose: bool =True, bucket_name: str = None, chat_history = None): # more only needed if you need to recreate the vectorstore which we wont with web app config = { 'reindex': False, 'bucket_name': bucket_name } if verbose: print(f"user_input: {user_input}") print(f"process_input config: {config}") logging.info(f"user_input: {user_input}") logging.info(f"process_input config: {config}") memory = setup_memory(config) answer = memory.question_memory(user_input, llm=chat, verbose=verbose, chat_history = chat_history) response = {'result': 'No answer found'} if answer is not None: response = {'result': answer['result']} if answer.get('source_documents') is not None: source_documents = [document_to_dict(doc) for doc in answer['source_documents']] response['source_documents'] = source_documents else: logging.info('No source documents found') return response def add_single_file(filename: str, bucket_name, verbose=False): config = { 'reindex': False, # as we will trigger file summary directly 'bucket_name': bucket_name } filename = pathlib.Path(filename) if not filename.is_file(): raise ValueError(f"Filename was not a valid file path: {filename}") docs = read_file_to_document(filename) chunks = chunk_doc_to_docs(docs, filename.suffix) memory = setup_memory(config) docs_output = [] chunk_length = len(chunks) i = 0 for chunk in chunks: logging.info(f"Uploading chunk {i} of size {chunk_length} for {filename.name}") i+=1 memory.add_user_message(chunk.page_content, metadata={"task": "singlefile load original", "source": filename.name}) docs_output.append(chunk.page_content) return docs_output def summarise_single_file(filename: str, bucket_name, verbose=False): config = { 'reindex': False, # as we will trigger file summary directly 'bucket_name': bucket_name } filename = pathlib.Path(filename) if not filename.is_file(): raise ValueError(f"Filename was not a valid file path: {filename}") memory = setup_memory(config) summary_filename = generate_summary(filename, memory, resummarise=True, verbose=verbose) if not summary_filename: return f"No summary generated for {str(filename)}" documents = read_file_to_document(summary_filename) chunks = chunk_doc_to_docs(documents, filename.suffix) output_content = "" for chunk in chunks: memory.add_user_message(chunk.page_content, metadata={"task": "singlefile load summary", "source": filename.name}) output_content += chunk.page_content + "\n\n" return output_content def chunk_doc_to_docs(documents: list, extension: str = ".md"): """Turns a Document object into a list of many Document chunks""" for document in documents: source_chunks = [] splitter = choose_splitter(extension) for chunk in splitter.split_text(document.page_content): source_chunks.append(Document(page_content=chunk, metadata=document.metadata)) return source_chunks def main(config): memory = setup_memory(config) while True: print('\n\033[31m' + '=Ask a question. CTRL + C to quit.') print ("=If I don't know, tell me the right answer so I can learn and answer more accurately next time" + '\033[m') user_input = input() print('\033[31m') answer = memory.question_memory(user_input, llm=chat, verbose=config['verbose']) if answer is not None: if answer.get('source_documents') is not None: print('\n== Document sources:') i = 0 for doc in answer.get('source_documents'): i += 1 print(f'-- Source {i}') print(f' - page_content:\n {doc.page_content}') if config['verbose']: print(f' - metadata: \n{doc.metadata}') print('\n================================') print('== Answer:\n\n' + answer['result']) else: print('Sorry') print('\033[m') if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Chat with a GitHub repository", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("repo", help="The GitHub repository on local disk") parser.add_argument("--reindex", action="store_true", help="Whether to re-index the doc database that supply context to the Q&A") parser.add_argument("--ext", help="Comma separated list of file extensions to include. Defaults to '.md,.py'") parser.add_argument("--ignore", help="Directory to ignore file imports from. Defaults to 'env/'") parser.add_argument("--resummarise", action="store_true", help="Recreate the code.md files describing the code") parser.add_argument("--verbose", action="store_true", help="Include metadata such as sources in replies") parser.add_argument("--bucket", help="A Google Cloud Storage bucket name e.g. ga://your-bucket-name") args = parser.parse_args() config = vars(args) try: main(config) except KeyboardInterrupt: print(' - User exit.') sys.exit(1)
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.docstore.document.Document", "langchain.text_splitter.MarkdownTextSplitter", "langchain.chat_models.ChatOpenAI", "langchain.document_loaders.unstructured.UnstructuredFileLoader", "langchain.text_splitter.PythonCodeTextSplitter", "langchain.PromptTemplate" ]
[((245, 272), 'sys.path.append', 'sys.path.append', (['parent_dir'], {}), '(parent_dir)\n', (260, 272), False, 'import sys, os, shutil\n'), ((667, 692), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (677, 692), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1204, 1227), 'pathlib.Path', 'pathlib.Path', (['repo_path'], {}), '(repo_path)\n', (1216, 1227), False, 'import pathlib\n'), ((3797, 3821), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (3810, 3821), False, 'import sys, os, shutil\n'), ((3848, 3875), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (3864, 3875), False, 'import sys, os, shutil\n'), ((3891, 3933), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{file_base}.txt"""'], {}), "(file_dir, f'{file_base}.txt')\n", (3903, 3933), False, 'import sys, os, shutil\n'), ((3938, 3974), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'txt_file'], {}), '(file_path, txt_file)\n', (3953, 3974), False, 'import sys, os, shutil\n'), ((4521, 4579), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['txt']", 'template': 'template'}), "(input_variables=['txt'], template=template)\n", (4535, 4579), False, 'from langchain import PromptTemplate\n'), ((4840, 4898), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['txt']", 'template': 'template'}), "(input_variables=['txt'], template=template)\n", (4854, 4898), False, 'from langchain import PromptTemplate\n'), ((6905, 6932), 'pathlib.Path', 'pathlib.Path', (['new_file_name'], {}), '(new_file_name)\n', (6917, 6932), False, 'import pathlib\n'), ((7874, 7974), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'text_splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (7918, 7974), True, 'import langchain.text_splitter as text_splitter\n'), ((8013, 8054), 'my_llm.langchain_class.PubSubChatMessageHistory', 'PubSubChatMessageHistory', (['"""qna_documents"""'], {}), "('qna_documents')\n", (8037, 8054), False, 'from my_llm.langchain_class import PubSubChatMessageHistory\n'), ((9411, 9452), 'logging.info', 'logging.info', (['f"""user_input: {user_input}"""'], {}), "(f'user_input: {user_input}')\n", (9423, 9452), False, 'import logging\n'), ((9457, 9504), 'logging.info', 'logging.info', (['f"""process_input config: {config}"""'], {}), "(f'process_input config: {config}')\n", (9469, 9504), False, 'import logging\n'), ((10381, 10403), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (10393, 10403), False, 'import pathlib\n'), ((11324, 11346), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (11336, 11346), False, 'import pathlib\n'), ((13800, 13928), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chat with a GitHub repository"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Chat with a GitHub repository',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (13823, 13928), False, 'import argparse\n'), ((211, 236), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (226, 236), False, 'import sys, os, shutil\n'), ((2949, 2980), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['md_file'], {}), '(md_file)\n', (2971, 2980), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((5232, 5261), 'os.path.isfile', 'os.path.isfile', (['new_file_name'], {}), '(new_file_name)\n', (5246, 5261), False, 'import sys, os, shutil\n'), ((6551, 6617), 'logging.info', 'logging.info', (['f"""Summarising chunk {i} of {num_chunks} of {a_file}"""'], {}), "(f'Summarising chunk {i} of {num_chunks} of {a_file}')\n", (6563, 6617), False, 'import logging\n'), ((6824, 6886), 'my_llm.standards.save_to_file', 'my_llm.save_to_file', (['new_file_name', "(summary + '\\n\\n')"], {'type': '"""a"""'}), "(new_file_name, summary + '\\n\\n', type='a')\n", (6843, 6886), True, 'from my_llm import standards as my_llm\n'), ((7738, 7776), 'langchain.text_splitter.PythonCodeTextSplitter', 'text_splitter.PythonCodeTextSplitter', ([], {}), '()\n', (7774, 7776), True, 'import langchain.text_splitter as text_splitter\n'), ((10744, 10823), 'logging.info', 'logging.info', (['f"""Uploading chunk {i} of size {chunk_length} for {filename.name}"""'], {}), "(f'Uploading chunk {i} of size {chunk_length} for {filename.name}')\n", (10756, 10823), False, 'import logging\n'), ((7821, 7857), 'langchain.text_splitter.MarkdownTextSplitter', 'text_splitter.MarkdownTextSplitter', ([], {}), '()\n', (7855, 7857), True, 'import langchain.text_splitter as text_splitter\n'), ((10113, 10154), 'logging.info', 'logging.info', (['"""No source documents found"""'], {}), "('No source documents found')\n", (10125, 10154), False, 'import logging\n'), ((14903, 14914), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14911, 14914), False, 'import sys, os, shutil\n'), ((3351, 3383), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['txt_file'], {}), '(txt_file)\n', (3373, 3383), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((3520, 3539), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (3529, 3539), False, 'import sys, os, shutil\n'), ((5830, 5853), 'os.path.abspath', 'os.path.abspath', (['a_file'], {}), '(a_file)\n', (5845, 5853), False, 'import sys, os, shutil\n'), ((7533, 7587), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'source.metadata'}), '(page_content=chunk, metadata=source.metadata)\n', (7541, 7587), False, 'from langchain.docstore.document import Document\n'), ((12585, 12641), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'document.metadata'}), '(page_content=chunk, metadata=document.metadata)\n', (12593, 12641), False, 'from langchain.docstore.document import Document\n')]
#!/Users/mark/dev/ml/langchain/read_github/langchain_github/env/bin/python # change above to the location of your local Python venv installation import sys, os, shutil parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(parent_dir) import pathlib from langchain.docstore.document import Document import langchain.text_splitter as text_splitter from langchain.chat_models import ChatOpenAI from my_llm import standards as my_llm from my_llm.langchain_class import PubSubChatMessageHistory from langchain import PromptTemplate from langchain.document_loaders.unstructured import UnstructuredFileLoader import logging chat = ChatOpenAI(temperature=0) CODE_EXTENSIONS = [".py", ".js", ".java", ".c", ".cpp", ".cc", ".cxx", ".hpp", ".h", ".cs", ".m", ".swift", ".go", ".rs", ".rb", ".php", ".pl", ".kt", ".kts", ".ts", ".scala", ".hs", ".lua", ".sh", ".bash", ".r", ".m", ".sql", ".html", ".css", ".xml", ".json", ".yaml", ".yml"] # Get Markdown documents from a repository def get_repo_docs(repo_path, extension, memory, ignore=None, resummarise=False, verbose=False): repo = pathlib.Path(repo_path) ignore_path = "" if ignore is not None: ignore_path = repo / ignore if not ignore_path.is_dir(): print("WARNING: --ignore must be a directory") print('Ignoring %s' % ignore_path) exts = extension.split(",") for ext in exts: the_glob = f"**/*{ext}" matched_files = list(repo.glob(the_glob)) num_matched_files = len(matched_files) print(f"Number of matched {ext} files: {num_matched_files}") # Generate summary md files if ext!=".md": k = 0 for non_md_file in repo.glob(the_glob): k += 1 if str(non_md_file).startswith(str(ignore_path)): continue generate_summary(non_md_file, memory, resummarise=resummarise, verbose=verbose) if verbose: print(f"Generated summary for a {ext} file: {k} of {num_matched_files} done.") # Iterate over all files in the repo (including subdirectories) print(f"Reading {ext} files") i = 0 j = 0 for md_file in repo.glob(the_glob): if str(md_file).startswith(str(ignore_path)): j += 1 continue i += 1 # Read the content of the file yield read_file_to_document(md_file) if verbose: print(f"Read {i} files so far and ignored {j}: total: {num_matched_files}") print(f"Read {i} and ignored {j} {ext} files.") print("Read all files") def read_file_to_document(md_file, split=False, metadata: dict = None): try: loader = UnstructuredFileLoader(md_file) if split: # only supported for some file types docs = loader.load_and_split() else: docs = loader.load() except ValueError as e: if "file type is not supported in partition" in str(e): # Convert the file to .txt and try again txt_file = convert_to_txt(md_file) loader = UnstructuredFileLoader(txt_file) if split: docs = loader.load_and_split() else: docs = loader.load() os.remove(txt_file) # Remove the temporary .txt file after processing else: raise e for doc in docs: if metadata is not None: doc.metadata.update(metadata) return docs def convert_to_txt(file_path): file_dir, file_name = os.path.split(file_path) file_base, file_ext = os.path.splitext(file_name) txt_file = os.path.join(file_dir, f"{file_base}.txt") shutil.copyfile(file_path, txt_file) return txt_file def code_prompt(): # create prompt to pass in to LLM template = """ Summarise what the code does below. Use Markdown in your output with the following template: # a title summary of script purpose ## keywords Comma seperated list of 3-4 keywords suitable for this code ## classes A description of each class ## functions/methods How the functions or methods of a class work including listing the Inputs and outputs for each function ## code examples of use The code to summarise is here: {txt} """ return PromptTemplate( input_variables=["txt"], template=template, ) def text_prompt(): # create prompt to pass in to LLM template = """ Summarise the text below, and add some keywords at the bottom to describe the overall purpose of the text. The text to summarise is here: {txt} """ return PromptTemplate( input_variables=["txt"], template=template, ) # Function to summarise code from the OpenAI API def generate_summary(a_file: pathlib.Path, memory, resummarise: bool=False, verbose: bool=False): if a_file.is_dir(): raise ValueError(f"a_file must not be a directory: {a_file}") new_file_name = a_file.with_suffix('.md') if os.path.isfile(new_file_name) and not resummarise: if verbose: print(f"Skipping generating summary as found existing code summary file: {new_file_name}") return try: with open(a_file, "r") as file: file_text = file.read() except Exception as e: print(f"Error generating summary: {str(e)}") return if len(file_text) < 10: if verbose: print(f"Skipping generation as not enough information. Got: {file_text}") return document = Document(page_content=file_text, metadata = {"source": os.path.abspath(a_file)}) source_chunks = chunk_doc_to_docs([document], a_file.suffix) code = True if str(a_file.suffix).lower() in CODE_EXTENSIONS else False if code: print("================================================") print(f"Requesting code summary for {a_file} ") print("================================================") prompt = code_prompt() else: print("================================================") print(f"Requesting text summary for {a_file} ") print("================================================") prompt = text_prompt() num_chunks = len(source_chunks) i=0 for chunk in source_chunks: logging.info(f"Summarising chunk {i} of {num_chunks} of {a_file}") i += 1 summary = my_llm.request_llm( prompt.format(txt=chunk.page_content), chat, memory, metadata={'task':'summarise_chunk'}) my_llm.save_to_file(new_file_name, summary + '\n\n', type = "a") return pathlib.Path(new_file_name) # Get source chunks from a repository def get_source_docs(repo_path, extension, memory, ignore, resummarise, verbose): source_chunks = [] for source in get_repo_docs(repo_path, extension=extension, memory=memory, ignore=ignore, resummarise=resummarise, verbose=verbose): splitter = choose_splitter(extension) for chunk in splitter.split_text(source.page_content): source_chunks.append(Document(page_content=chunk, metadata=source.metadata)) return source_chunks def choose_splitter(extension: str, chunk_size: int=1024, chunk_overlap:int=0): if extension == ".py": return text_splitter.PythonCodeTextSplitter() elif extension == ".md": return text_splitter.MarkdownTextSplitter() return text_splitter.RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) def setup_memory(config): memory = PubSubChatMessageHistory("qna_documents") if config.get('bucket_name', None) is not None: memory.set_bucket(config.get('bucket_name')) memory.load_vectorstore_memory() if config['reindex']: # Create a new Chroma DB exts = '.md,.py' if config['ext']: exts = config['ext'] source_chunks = get_source_docs(config['repo'], extension=exts, memory=memory, ignore=config['ignore'], resummarise=config['resummarise'], verbose=config['verbose']) memory.save_vectorstore_memory(source_chunks, verbose=config['verbose']) return memory def document_to_dict(document): return { 'page_content': document.page_content, 'metadata': document.metadata, } def process_input(user_input: str, verbose: bool =True, bucket_name: str = None, chat_history = None): # more only needed if you need to recreate the vectorstore which we wont with web app config = { 'reindex': False, 'bucket_name': bucket_name } if verbose: print(f"user_input: {user_input}") print(f"process_input config: {config}") logging.info(f"user_input: {user_input}") logging.info(f"process_input config: {config}") memory = setup_memory(config) answer = memory.question_memory(user_input, llm=chat, verbose=verbose, chat_history = chat_history) response = {'result': 'No answer found'} if answer is not None: response = {'result': answer['result']} if answer.get('source_documents') is not None: source_documents = [document_to_dict(doc) for doc in answer['source_documents']] response['source_documents'] = source_documents else: logging.info('No source documents found') return response def add_single_file(filename: str, bucket_name, verbose=False): config = { 'reindex': False, # as we will trigger file summary directly 'bucket_name': bucket_name } filename = pathlib.Path(filename) if not filename.is_file(): raise ValueError(f"Filename was not a valid file path: {filename}") docs = read_file_to_document(filename) chunks = chunk_doc_to_docs(docs, filename.suffix) memory = setup_memory(config) docs_output = [] chunk_length = len(chunks) i = 0 for chunk in chunks: logging.info(f"Uploading chunk {i} of size {chunk_length} for {filename.name}") i+=1 memory.add_user_message(chunk.page_content, metadata={"task": "singlefile load original", "source": filename.name}) docs_output.append(chunk.page_content) return docs_output def summarise_single_file(filename: str, bucket_name, verbose=False): config = { 'reindex': False, # as we will trigger file summary directly 'bucket_name': bucket_name } filename = pathlib.Path(filename) if not filename.is_file(): raise ValueError(f"Filename was not a valid file path: {filename}") memory = setup_memory(config) summary_filename = generate_summary(filename, memory, resummarise=True, verbose=verbose) if not summary_filename: return f"No summary generated for {str(filename)}" documents = read_file_to_document(summary_filename) chunks = chunk_doc_to_docs(documents, filename.suffix) output_content = "" for chunk in chunks: memory.add_user_message(chunk.page_content, metadata={"task": "singlefile load summary", "source": filename.name}) output_content += chunk.page_content + "\n\n" return output_content def chunk_doc_to_docs(documents: list, extension: str = ".md"): """Turns a Document object into a list of many Document chunks""" for document in documents: source_chunks = [] splitter = choose_splitter(extension) for chunk in splitter.split_text(document.page_content): source_chunks.append(Document(page_content=chunk, metadata=document.metadata)) return source_chunks def main(config): memory = setup_memory(config) while True: print('\n\033[31m' + '=Ask a question. CTRL + C to quit.') print ("=If I don't know, tell me the right answer so I can learn and answer more accurately next time" + '\033[m') user_input = input() print('\033[31m') answer = memory.question_memory(user_input, llm=chat, verbose=config['verbose']) if answer is not None: if answer.get('source_documents') is not None: print('\n== Document sources:') i = 0 for doc in answer.get('source_documents'): i += 1 print(f'-- Source {i}') print(f' - page_content:\n {doc.page_content}') if config['verbose']: print(f' - metadata: \n{doc.metadata}') print('\n================================') print('== Answer:\n\n' + answer['result']) else: print('Sorry') print('\033[m') if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Chat with a GitHub repository", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("repo", help="The GitHub repository on local disk") parser.add_argument("--reindex", action="store_true", help="Whether to re-index the doc database that supply context to the Q&A") parser.add_argument("--ext", help="Comma separated list of file extensions to include. Defaults to '.md,.py'") parser.add_argument("--ignore", help="Directory to ignore file imports from. Defaults to 'env/'") parser.add_argument("--resummarise", action="store_true", help="Recreate the code.md files describing the code") parser.add_argument("--verbose", action="store_true", help="Include metadata such as sources in replies") parser.add_argument("--bucket", help="A Google Cloud Storage bucket name e.g. ga://your-bucket-name") args = parser.parse_args() config = vars(args) try: main(config) except KeyboardInterrupt: print(' - User exit.') sys.exit(1)
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.docstore.document.Document", "langchain.text_splitter.MarkdownTextSplitter", "langchain.chat_models.ChatOpenAI", "langchain.document_loaders.unstructured.UnstructuredFileLoader", "langchain.text_splitter.PythonCodeTextSplitter", "langchain.PromptTemplate" ]
[((245, 272), 'sys.path.append', 'sys.path.append', (['parent_dir'], {}), '(parent_dir)\n', (260, 272), False, 'import sys, os, shutil\n'), ((667, 692), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (677, 692), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1204, 1227), 'pathlib.Path', 'pathlib.Path', (['repo_path'], {}), '(repo_path)\n', (1216, 1227), False, 'import pathlib\n'), ((3797, 3821), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (3810, 3821), False, 'import sys, os, shutil\n'), ((3848, 3875), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (3864, 3875), False, 'import sys, os, shutil\n'), ((3891, 3933), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{file_base}.txt"""'], {}), "(file_dir, f'{file_base}.txt')\n", (3903, 3933), False, 'import sys, os, shutil\n'), ((3938, 3974), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'txt_file'], {}), '(file_path, txt_file)\n', (3953, 3974), False, 'import sys, os, shutil\n'), ((4521, 4579), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['txt']", 'template': 'template'}), "(input_variables=['txt'], template=template)\n", (4535, 4579), False, 'from langchain import PromptTemplate\n'), ((4840, 4898), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['txt']", 'template': 'template'}), "(input_variables=['txt'], template=template)\n", (4854, 4898), False, 'from langchain import PromptTemplate\n'), ((6905, 6932), 'pathlib.Path', 'pathlib.Path', (['new_file_name'], {}), '(new_file_name)\n', (6917, 6932), False, 'import pathlib\n'), ((7874, 7974), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'text_splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (7918, 7974), True, 'import langchain.text_splitter as text_splitter\n'), ((8013, 8054), 'my_llm.langchain_class.PubSubChatMessageHistory', 'PubSubChatMessageHistory', (['"""qna_documents"""'], {}), "('qna_documents')\n", (8037, 8054), False, 'from my_llm.langchain_class import PubSubChatMessageHistory\n'), ((9411, 9452), 'logging.info', 'logging.info', (['f"""user_input: {user_input}"""'], {}), "(f'user_input: {user_input}')\n", (9423, 9452), False, 'import logging\n'), ((9457, 9504), 'logging.info', 'logging.info', (['f"""process_input config: {config}"""'], {}), "(f'process_input config: {config}')\n", (9469, 9504), False, 'import logging\n'), ((10381, 10403), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (10393, 10403), False, 'import pathlib\n'), ((11324, 11346), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (11336, 11346), False, 'import pathlib\n'), ((13800, 13928), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chat with a GitHub repository"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Chat with a GitHub repository',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (13823, 13928), False, 'import argparse\n'), ((211, 236), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (226, 236), False, 'import sys, os, shutil\n'), ((2949, 2980), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['md_file'], {}), '(md_file)\n', (2971, 2980), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((5232, 5261), 'os.path.isfile', 'os.path.isfile', (['new_file_name'], {}), '(new_file_name)\n', (5246, 5261), False, 'import sys, os, shutil\n'), ((6551, 6617), 'logging.info', 'logging.info', (['f"""Summarising chunk {i} of {num_chunks} of {a_file}"""'], {}), "(f'Summarising chunk {i} of {num_chunks} of {a_file}')\n", (6563, 6617), False, 'import logging\n'), ((6824, 6886), 'my_llm.standards.save_to_file', 'my_llm.save_to_file', (['new_file_name', "(summary + '\\n\\n')"], {'type': '"""a"""'}), "(new_file_name, summary + '\\n\\n', type='a')\n", (6843, 6886), True, 'from my_llm import standards as my_llm\n'), ((7738, 7776), 'langchain.text_splitter.PythonCodeTextSplitter', 'text_splitter.PythonCodeTextSplitter', ([], {}), '()\n', (7774, 7776), True, 'import langchain.text_splitter as text_splitter\n'), ((10744, 10823), 'logging.info', 'logging.info', (['f"""Uploading chunk {i} of size {chunk_length} for {filename.name}"""'], {}), "(f'Uploading chunk {i} of size {chunk_length} for {filename.name}')\n", (10756, 10823), False, 'import logging\n'), ((7821, 7857), 'langchain.text_splitter.MarkdownTextSplitter', 'text_splitter.MarkdownTextSplitter', ([], {}), '()\n', (7855, 7857), True, 'import langchain.text_splitter as text_splitter\n'), ((10113, 10154), 'logging.info', 'logging.info', (['"""No source documents found"""'], {}), "('No source documents found')\n", (10125, 10154), False, 'import logging\n'), ((14903, 14914), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14911, 14914), False, 'import sys, os, shutil\n'), ((3351, 3383), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['txt_file'], {}), '(txt_file)\n', (3373, 3383), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((3520, 3539), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (3529, 3539), False, 'import sys, os, shutil\n'), ((5830, 5853), 'os.path.abspath', 'os.path.abspath', (['a_file'], {}), '(a_file)\n', (5845, 5853), False, 'import sys, os, shutil\n'), ((7533, 7587), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'source.metadata'}), '(page_content=chunk, metadata=source.metadata)\n', (7541, 7587), False, 'from langchain.docstore.document import Document\n'), ((12585, 12641), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'document.metadata'}), '(page_content=chunk, metadata=document.metadata)\n', (12593, 12641), False, 'from langchain.docstore.document import Document\n')]
import os import json from typing import List from dotenv import load_dotenv from pydantic import BaseModel, Field from supabase.client import Client, create_client from langchain.chat_models import ChatOpenAI from langchain.embeddings.openai import OpenAIEmbeddings from langchain.tools import StructuredTool from langchain.chains.openai_functions import create_structured_output_chain from langchain.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) import langchain load_dotenv() # Set debug to True to see A LOT of details of langchain's inner workings # langchain.debug = True # The name of the table in Supabase, where the vectors are stored matchVectorFunctionName = "match_embeddings" # Create the supabase client SUPABASE_URL = os.getenv("SUPABASE_URL") SUPABASE_KEY = os.getenv("SUPABASE_KEY") supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY) class ToolInputSchema(BaseModel): question: str = Field(..., description="A fully formed question.") class KnowledgeAnswer(BaseModel): answer: str = Field(..., description="The answer to the question.") sources: List[str] = Field( ..., description="The sources which contributed to the answer.", ) llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.3) prompt_msgs = [ SystemMessagePromptTemplate.from_template( """You're an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user. Context: {context}""" ), HumanMessagePromptTemplate.from_template("{question}"), ] prompt = ChatPromptTemplate.from_messages(prompt_msgs) chain = create_structured_output_chain(KnowledgeAnswer, llm, prompt) def get_answer(question: str) -> str: try: vectors = OpenAIEmbeddings().embed_documents([question]) embeddings = supabase.rpc( matchVectorFunctionName, dict(query_embedding=vectors[0], match_count=7) ).execute() print(f"⚡ Retrieved {len(embeddings.data)} vectors from Supabase:") for entry in embeddings.data: print("🔖 Title:", entry["metadata"]["title"]) print("🌐 Source:", entry["metadata"]["source"]) print("📊 Similarity:", entry["similarity"]) print("📄 Content:", entry["content"].replace("\n", " ")[:100] + "...") print("-" * 50) result = chain.run(context=json.dumps(embeddings.data), question=question) print("📝 Result of knowledge extraction chain:", result) return f"""Answer: {result.answer} Sources: {json.dumps(result.sources)} """ except Exception as e: print(e) return "The wiki knowledgebase is currently not available. We are working on it. Tell the user to use the wiki directly. https://www.defichainwiki.com/" description = """Use this if you need to answer any question about DeFiChain which does not require live-data. Make sure to include the source of the answer in your response.""" wikiTool = StructuredTool( name="defichain_wiki_knowledge", description=description, func=get_answer, args_schema=ToolInputSchema, ) if __name__ == "__main__": while True: question = input( "Ask something, that can be answered using information from DeFiChainWiki: " ) print("✅", get_answer(question))
[ "langchain.chains.openai_functions.create_structured_output_chain", "langchain.tools.StructuredTool", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.chat_models.ChatOpenAI", "langchain.prompts.ChatPromptTemplate.from_messages", "langchain.prompts.SystemMessagePromptTemplate.from_template", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((528, 541), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (539, 541), False, 'from dotenv import load_dotenv\n'), ((799, 824), 'os.getenv', 'os.getenv', (['"""SUPABASE_URL"""'], {}), "('SUPABASE_URL')\n", (808, 824), False, 'import os\n'), ((840, 865), 'os.getenv', 'os.getenv', (['"""SUPABASE_KEY"""'], {}), "('SUPABASE_KEY')\n", (849, 865), False, 'import os\n'), ((885, 926), 'supabase.client.create_client', 'create_client', (['SUPABASE_URL', 'SUPABASE_KEY'], {}), '(SUPABASE_URL, SUPABASE_KEY)\n', (898, 926), False, 'from supabase.client import Client, create_client\n'), ((1269, 1328), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo-16k"""', 'temperature': '(0.3)'}), "(model_name='gpt-3.5-turbo-16k', temperature=0.3)\n", (1279, 1328), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1687, 1732), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['prompt_msgs'], {}), '(prompt_msgs)\n', (1719, 1732), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1742, 1802), 'langchain.chains.openai_functions.create_structured_output_chain', 'create_structured_output_chain', (['KnowledgeAnswer', 'llm', 'prompt'], {}), '(KnowledgeAnswer, llm, prompt)\n', (1772, 1802), False, 'from langchain.chains.openai_functions import create_structured_output_chain\n'), ((3106, 3228), 'langchain.tools.StructuredTool', 'StructuredTool', ([], {'name': '"""defichain_wiki_knowledge"""', 'description': 'description', 'func': 'get_answer', 'args_schema': 'ToolInputSchema'}), "(name='defichain_wiki_knowledge', description=description,\n func=get_answer, args_schema=ToolInputSchema)\n", (3120, 3228), False, 'from langchain.tools import StructuredTool\n'), ((983, 1033), 'pydantic.Field', 'Field', (['...'], {'description': '"""A fully formed question."""'}), "(..., description='A fully formed question.')\n", (988, 1033), False, 'from pydantic import BaseModel, Field\n'), ((1088, 1141), 'pydantic.Field', 'Field', (['...'], {'description': '"""The answer to the question."""'}), "(..., description='The answer to the question.')\n", (1093, 1141), False, 'from pydantic import BaseModel, Field\n'), ((1167, 1237), 'pydantic.Field', 'Field', (['...'], {'description': '"""The sources which contributed to the answer."""'}), "(..., description='The sources which contributed to the answer.')\n", (1172, 1237), False, 'from pydantic import BaseModel, Field\n'), ((1350, 1610), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['"""You\'re an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.\n\n Context:\n {context}"""'], {}), '(\n """You\'re an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.\n\n Context:\n {context}"""\n )\n', (1391, 1610), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1620, 1674), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{question}"""'], {}), "('{question}')\n", (1660, 1674), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1870, 1888), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1886, 1888), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2493, 2520), 'json.dumps', 'json.dumps', (['embeddings.data'], {}), '(embeddings.data)\n', (2503, 2520), False, 'import json\n'), ((2684, 2710), 'json.dumps', 'json.dumps', (['result.sources'], {}), '(result.sources)\n', (2694, 2710), False, 'import json\n')]
import streamlit as st from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationalRetrievalChain from langchain.prompts.prompt import PromptTemplate from langchain.callbacks import get_openai_callback #fix Error: module 'langchain' has no attribute 'verbose' import langchain langchain.verbose = False class Chatbot: def __init__(self, model_name, temperature, vectors): self.model_name = model_name self.temperature = temperature self.vectors = vectors qa_template = """ You are a helpful AI assistant named Robby. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end. If you don't know the answer, just say you don't know. Do NOT try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. Use as much detail as possible when responding. context: {context} ========= question: {question} ====== """ QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["context","question" ]) def conversational_chat(self, query): """ Start a conversational chat with a model via Langchain """ llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature) retriever = self.vectors.as_retriever() chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, verbose=True, return_source_documents=True, max_tokens_limit=4097, combine_docs_chain_kwargs={'prompt': self.QA_PROMPT}) chain_input = {"question": query, "chat_history": st.session_state["history"]} result = chain(chain_input) st.session_state["history"].append((query, result["answer"])) #count_tokens_chain(chain, chain_input) return result["answer"] def count_tokens_chain(chain, query): with get_openai_callback() as cb: result = chain.run(query) st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens') return result
[ "langchain.chains.ConversationalRetrievalChain.from_llm", "langchain.prompts.prompt.PromptTemplate", "langchain.callbacks.get_openai_callback", "langchain.chat_models.ChatOpenAI" ]
[((1142, 1219), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['context', 'question']"}), "(template=qa_template, input_variables=['context', 'question'])\n", (1156, 1219), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1364, 1432), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.model_name', 'temperature': 'self.temperature'}), '(model_name=self.model_name, temperature=self.temperature)\n', (1374, 1432), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1500, 1697), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'return_source_documents': '(True)', 'max_tokens_limit': '(4097)', 'combine_docs_chain_kwargs': "{'prompt': self.QA_PROMPT}"}), "(llm=llm, retriever=retriever, verbose\n =True, return_source_documents=True, max_tokens_limit=4097,\n combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})\n", (1537, 1697), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((2025, 2046), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2044, 2046), False, 'from langchain.callbacks import get_openai_callback\n'), ((2096, 2175), 'streamlit.write', 'st.write', (['f"""###### Tokens used in this conversation : {cb.total_tokens} tokens"""'], {}), "(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')\n", (2104, 2175), True, 'import streamlit as st\n')]
import streamlit as st from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationalRetrievalChain from langchain.prompts.prompt import PromptTemplate from langchain.callbacks import get_openai_callback #fix Error: module 'langchain' has no attribute 'verbose' import langchain langchain.verbose = False class Chatbot: def __init__(self, model_name, temperature, vectors): self.model_name = model_name self.temperature = temperature self.vectors = vectors qa_template = """ You are a helpful AI assistant named Robby. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end. If you don't know the answer, just say you don't know. Do NOT try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. Use as much detail as possible when responding. context: {context} ========= question: {question} ====== """ QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["context","question" ]) def conversational_chat(self, query): """ Start a conversational chat with a model via Langchain """ llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature) retriever = self.vectors.as_retriever() chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, verbose=True, return_source_documents=True, max_tokens_limit=4097, combine_docs_chain_kwargs={'prompt': self.QA_PROMPT}) chain_input = {"question": query, "chat_history": st.session_state["history"]} result = chain(chain_input) st.session_state["history"].append((query, result["answer"])) #count_tokens_chain(chain, chain_input) return result["answer"] def count_tokens_chain(chain, query): with get_openai_callback() as cb: result = chain.run(query) st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens') return result
[ "langchain.chains.ConversationalRetrievalChain.from_llm", "langchain.prompts.prompt.PromptTemplate", "langchain.callbacks.get_openai_callback", "langchain.chat_models.ChatOpenAI" ]
[((1142, 1219), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['context', 'question']"}), "(template=qa_template, input_variables=['context', 'question'])\n", (1156, 1219), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1364, 1432), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.model_name', 'temperature': 'self.temperature'}), '(model_name=self.model_name, temperature=self.temperature)\n', (1374, 1432), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1500, 1697), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'return_source_documents': '(True)', 'max_tokens_limit': '(4097)', 'combine_docs_chain_kwargs': "{'prompt': self.QA_PROMPT}"}), "(llm=llm, retriever=retriever, verbose\n =True, return_source_documents=True, max_tokens_limit=4097,\n combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})\n", (1537, 1697), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((2025, 2046), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2044, 2046), False, 'from langchain.callbacks import get_openai_callback\n'), ((2096, 2175), 'streamlit.write', 'st.write', (['f"""###### Tokens used in this conversation : {cb.total_tokens} tokens"""'], {}), "(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')\n", (2104, 2175), True, 'import streamlit as st\n')]
import streamlit as st from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationalRetrievalChain from langchain.prompts.prompt import PromptTemplate from langchain.callbacks import get_openai_callback #fix Error: module 'langchain' has no attribute 'verbose' import langchain langchain.verbose = False class Chatbot: def __init__(self, model_name, temperature, vectors): self.model_name = model_name self.temperature = temperature self.vectors = vectors qa_template = """ You are a helpful AI assistant named Robby. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end. If you don't know the answer, just say you don't know. Do NOT try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. Use as much detail as possible when responding. context: {context} ========= question: {question} ====== """ QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["context","question" ]) def conversational_chat(self, query): """ Start a conversational chat with a model via Langchain """ llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature) retriever = self.vectors.as_retriever() chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, verbose=True, return_source_documents=True, max_tokens_limit=4097, combine_docs_chain_kwargs={'prompt': self.QA_PROMPT}) chain_input = {"question": query, "chat_history": st.session_state["history"]} result = chain(chain_input) st.session_state["history"].append((query, result["answer"])) #count_tokens_chain(chain, chain_input) return result["answer"] def count_tokens_chain(chain, query): with get_openai_callback() as cb: result = chain.run(query) st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens') return result
[ "langchain.chains.ConversationalRetrievalChain.from_llm", "langchain.prompts.prompt.PromptTemplate", "langchain.callbacks.get_openai_callback", "langchain.chat_models.ChatOpenAI" ]
[((1142, 1219), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['context', 'question']"}), "(template=qa_template, input_variables=['context', 'question'])\n", (1156, 1219), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1364, 1432), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.model_name', 'temperature': 'self.temperature'}), '(model_name=self.model_name, temperature=self.temperature)\n', (1374, 1432), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1500, 1697), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'return_source_documents': '(True)', 'max_tokens_limit': '(4097)', 'combine_docs_chain_kwargs': "{'prompt': self.QA_PROMPT}"}), "(llm=llm, retriever=retriever, verbose\n =True, return_source_documents=True, max_tokens_limit=4097,\n combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})\n", (1537, 1697), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((2025, 2046), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2044, 2046), False, 'from langchain.callbacks import get_openai_callback\n'), ((2096, 2175), 'streamlit.write', 'st.write', (['f"""###### Tokens used in this conversation : {cb.total_tokens} tokens"""'], {}), "(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')\n", (2104, 2175), True, 'import streamlit as st\n')]
import streamlit as st from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationalRetrievalChain from langchain.prompts.prompt import PromptTemplate from langchain.callbacks import get_openai_callback #fix Error: module 'langchain' has no attribute 'verbose' import langchain langchain.verbose = False class Chatbot: def __init__(self, model_name, temperature, vectors): self.model_name = model_name self.temperature = temperature self.vectors = vectors qa_template = """ You are a helpful AI assistant named Robby. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end. If you don't know the answer, just say you don't know. Do NOT try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. Use as much detail as possible when responding. context: {context} ========= question: {question} ====== """ QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["context","question" ]) def conversational_chat(self, query): """ Start a conversational chat with a model via Langchain """ llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature) retriever = self.vectors.as_retriever() chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, verbose=True, return_source_documents=True, max_tokens_limit=4097, combine_docs_chain_kwargs={'prompt': self.QA_PROMPT}) chain_input = {"question": query, "chat_history": st.session_state["history"]} result = chain(chain_input) st.session_state["history"].append((query, result["answer"])) #count_tokens_chain(chain, chain_input) return result["answer"] def count_tokens_chain(chain, query): with get_openai_callback() as cb: result = chain.run(query) st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens') return result
[ "langchain.chains.ConversationalRetrievalChain.from_llm", "langchain.prompts.prompt.PromptTemplate", "langchain.callbacks.get_openai_callback", "langchain.chat_models.ChatOpenAI" ]
[((1142, 1219), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['context', 'question']"}), "(template=qa_template, input_variables=['context', 'question'])\n", (1156, 1219), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1364, 1432), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.model_name', 'temperature': 'self.temperature'}), '(model_name=self.model_name, temperature=self.temperature)\n', (1374, 1432), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1500, 1697), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'return_source_documents': '(True)', 'max_tokens_limit': '(4097)', 'combine_docs_chain_kwargs': "{'prompt': self.QA_PROMPT}"}), "(llm=llm, retriever=retriever, verbose\n =True, return_source_documents=True, max_tokens_limit=4097,\n combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})\n", (1537, 1697), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((2025, 2046), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2044, 2046), False, 'from langchain.callbacks import get_openai_callback\n'), ((2096, 2175), 'streamlit.write', 'st.write', (['f"""###### Tokens used in this conversation : {cb.total_tokens} tokens"""'], {}), "(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')\n", (2104, 2175), True, 'import streamlit as st\n')]
""" A simple CUI application to visualize and query a customer database using the `textual` package. """ from dataclasses import dataclass import langchain from langchain.cache import SQLiteCache from langchain.llms import OpenAI from textual.app import App, ComposeResult from textual.containers import Horizontal from textual.widgets import Button, DataTable, Footer, Header, Input from llm_strategy import llm_strategy langchain.llm_cache = SQLiteCache() base_llm = OpenAI(max_tokens=1024) @llm_strategy(base_llm) @dataclass class Customer: key: str first_name: str last_name: str birthdate: str address: str @property def age(self: "Customer") -> int: """Return the current age of the customer. This is a computed property based on `birthdate` and the current year (2022). """ raise NotImplementedError() @dataclass class CustomerDatabase: customers: list[Customer] def find_customer_key(self: "CustomerDatabase", query: str) -> list[str]: """Find the keys of the customers that match a natural language query best (sorted by closeness to the match). We support semantic queries instead of SQL, so we can search for things like "the customer that was born in 1990". Args: query: Natural language query Returns: The index of the best matching customer in the database. """ raise NotImplementedError() def load(self: "CustomerDatabase"): """Load the customer database from a file.""" raise NotImplementedError() def store(self: "CustomerDatabase"): """Store the customer database to a file.""" raise NotImplementedError() @llm_strategy(base_llm) @dataclass class MockCustomerDatabase(CustomerDatabase): def load(self): self.customers = self.create_mock_customers(10) def store(self): pass @staticmethod def create_mock_customers(num_customers: int = 1) -> list[Customer]: """ Create mock customers with believable data (our customers are world citizens). """ raise NotImplementedError() class CustomerDatabaseApp(App): """A simple textual application to visualize and query a customer database. We show all the customers in a table and allow the user to query the database using natural language in a search box at the bottom of the screen. """ PRIORITY_BINDINGS = False BINDINGS = [("q", "quit", "Quit the application"), ("s", "screenshot", "Take a screenshot")] database: CustomerDatabase = MockCustomerDatabase([]) data_table = DataTable(id="customer_table") search_box = Input(id="search_box", placeholder="Search for a customer (use any kind of query") footer_bar = Horizontal(search_box) def on_mount(self) -> None: self.database.load() self.data_table.add_columns("First Name", "Last Name", "Birthdate", "Address", "Age") self.search("") def compose(self) -> ComposeResult: self.footer_bar.styles.dock = "bottom" self.footer_bar.styles.width = "100%" self.footer_bar.styles.height = 4 self.data_table.styles.height = "auto" self.data_table.styles.width = "100%" self.screen.styles.height = "100%" self.search_box.styles.width = "100%" yield Header() yield self.footer_bar yield Footer() yield self.data_table def search(self, query: str): """Search the customer database using a natural language query.""" self.data_table.clear() if not query: for customer in self.database.customers: self.data_table.add_row( # customer.key, customer.first_name, customer.last_name, customer.birthdate, customer.address, str(customer.age), ) else: keys = self.database.find_customer_key(query) for key in keys: customers_for_key = [customer for customer in self.database.customers if customer.key == key] assert len(customers_for_key) == 1 customer = customers_for_key[0] self.data_table.add_row( # customer.key, customer.first_name, customer.last_name, customer.birthdate, customer.address, str(customer.age), ) def on_button_pressed(self, event: Button.Pressed) -> None: if event.button is self.exit_button: self.exit() def on_input_submitted(self, event: Input.Submitted) -> None: if event.input is self.search_box: self.search(event.value) if __name__ == "__main__": app = CustomerDatabaseApp() app.run()
[ "langchain.llms.OpenAI", "langchain.cache.SQLiteCache" ]
[((447, 460), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {}), '()\n', (458, 460), False, 'from langchain.cache import SQLiteCache\n'), ((472, 495), 'langchain.llms.OpenAI', 'OpenAI', ([], {'max_tokens': '(1024)'}), '(max_tokens=1024)\n', (478, 495), False, 'from langchain.llms import OpenAI\n'), ((499, 521), 'llm_strategy.llm_strategy', 'llm_strategy', (['base_llm'], {}), '(base_llm)\n', (511, 521), False, 'from llm_strategy import llm_strategy\n'), ((1731, 1753), 'llm_strategy.llm_strategy', 'llm_strategy', (['base_llm'], {}), '(base_llm)\n', (1743, 1753), False, 'from llm_strategy import llm_strategy\n'), ((2643, 2673), 'textual.widgets.DataTable', 'DataTable', ([], {'id': '"""customer_table"""'}), "(id='customer_table')\n", (2652, 2673), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((2691, 2778), 'textual.widgets.Input', 'Input', ([], {'id': '"""search_box"""', 'placeholder': '"""Search for a customer (use any kind of query"""'}), "(id='search_box', placeholder=\n 'Search for a customer (use any kind of query')\n", (2696, 2778), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((2791, 2813), 'textual.containers.Horizontal', 'Horizontal', (['search_box'], {}), '(search_box)\n', (2801, 2813), False, 'from textual.containers import Horizontal\n'), ((3369, 3377), 'textual.widgets.Header', 'Header', ([], {}), '()\n', (3375, 3377), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((3422, 3430), 'textual.widgets.Footer', 'Footer', ([], {}), '()\n', (3428, 3430), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n')]
import os import cassio import langchain from langchain.cache import CassandraCache from langchain_community.chat_models import ChatOpenAI from langchain_core.messages import BaseMessage from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableLambda use_cassandra = int(os.environ.get("USE_CASSANDRA_CLUSTER", "0")) if use_cassandra: from .cassandra_cluster_init import get_cassandra_connection session, keyspace = get_cassandra_connection() cassio.init( session=session, keyspace=keyspace, ) else: cassio.init( token=os.environ["ASTRA_DB_APPLICATION_TOKEN"], database_id=os.environ["ASTRA_DB_ID"], keyspace=os.environ.get("ASTRA_DB_KEYSPACE"), ) # inits langchain.llm_cache = CassandraCache(session=None, keyspace=None) llm = ChatOpenAI() # custom runnables def msg_splitter(msg: BaseMessage): return [w.strip() for w in msg.content.split(",") if w.strip()] # synonym-route preparation synonym_prompt = ChatPromptTemplate.from_template( "List up to five comma-separated synonyms of this word: {word}" ) chain = synonym_prompt | llm | RunnableLambda(msg_splitter)
[ "langchain_core.prompts.ChatPromptTemplate.from_template", "langchain_community.chat_models.ChatOpenAI", "langchain_core.runnables.RunnableLambda", "langchain.cache.CassandraCache" ]
[((788, 831), 'langchain.cache.CassandraCache', 'CassandraCache', ([], {'session': 'None', 'keyspace': 'None'}), '(session=None, keyspace=None)\n', (802, 831), False, 'from langchain.cache import CassandraCache\n'), ((838, 850), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (848, 850), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1023, 1125), 'langchain_core.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['"""List up to five comma-separated synonyms of this word: {word}"""'], {}), "(\n 'List up to five comma-separated synonyms of this word: {word}')\n", (1055, 1125), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((315, 359), 'os.environ.get', 'os.environ.get', (['"""USE_CASSANDRA_CLUSTER"""', '"""0"""'], {}), "('USE_CASSANDRA_CLUSTER', '0')\n", (329, 359), False, 'import os\n'), ((500, 547), 'cassio.init', 'cassio.init', ([], {'session': 'session', 'keyspace': 'keyspace'}), '(session=session, keyspace=keyspace)\n', (511, 547), False, 'import cassio\n'), ((1159, 1187), 'langchain_core.runnables.RunnableLambda', 'RunnableLambda', (['msg_splitter'], {}), '(msg_splitter)\n', (1173, 1187), False, 'from langchain_core.runnables import RunnableLambda\n'), ((714, 749), 'os.environ.get', 'os.environ.get', (['"""ASTRA_DB_KEYSPACE"""'], {}), "('ASTRA_DB_KEYSPACE')\n", (728, 749), False, 'import os\n')]
import streamlit as st import langchain_helper as lch import textwrap import sys import os st.set_page_config(page_icon="🌈",page_title="Youtube Assistant",layout="centered") os.environ["OPENAI_API_KEY"] == st.secrets["OPENAI_API_KEY"], st.header("Youtube Assistant 🔥") with st.form(key='my_form'): video_url = st.text_input(label='Enter a Youtube video URL',max_chars=50) query = st.text_input(label='Ask a question about the video',max_chars=50) submitted = st.form_submit_button(label='Submit') if query and video_url: try: db = lch.create_db_from_youtube_video_url(video_url=video_url) except ValueError: st.error("Invalid Youtube URL") try: response, docs = lch.get_response_from_query(db=db,query=query) st.header("Answer:") st.text(textwrap.fill(response,width=45)) except NameError: st.error("Invalid Query")
[ "langchain_helper.create_db_from_youtube_video_url", "langchain_helper.get_response_from_query" ]
[((93, 182), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_icon': '"""🌈"""', 'page_title': '"""Youtube Assistant"""', 'layout': '"""centered"""'}), "(page_icon='🌈', page_title='Youtube Assistant', layout=\n 'centered')\n", (111, 182), True, 'import streamlit as st\n'), ((238, 270), 'streamlit.header', 'st.header', (['"""Youtube Assistant 🔥"""'], {}), "('Youtube Assistant 🔥')\n", (247, 270), True, 'import streamlit as st\n'), ((276, 298), 'streamlit.form', 'st.form', ([], {'key': '"""my_form"""'}), "(key='my_form')\n", (283, 298), True, 'import streamlit as st\n'), ((316, 378), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Enter a Youtube video URL"""', 'max_chars': '(50)'}), "(label='Enter a Youtube video URL', max_chars=50)\n", (329, 378), True, 'import streamlit as st\n'), ((390, 457), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Ask a question about the video"""', 'max_chars': '(50)'}), "(label='Ask a question about the video', max_chars=50)\n", (403, 457), True, 'import streamlit as st\n'), ((473, 510), 'streamlit.form_submit_button', 'st.form_submit_button', ([], {'label': '"""Submit"""'}), "(label='Submit')\n", (494, 510), True, 'import streamlit as st\n'), ((558, 615), 'langchain_helper.create_db_from_youtube_video_url', 'lch.create_db_from_youtube_video_url', ([], {'video_url': 'video_url'}), '(video_url=video_url)\n', (594, 615), True, 'import langchain_helper as lch\n'), ((713, 760), 'langchain_helper.get_response_from_query', 'lch.get_response_from_query', ([], {'db': 'db', 'query': 'query'}), '(db=db, query=query)\n', (740, 760), True, 'import langchain_helper as lch\n'), ((768, 788), 'streamlit.header', 'st.header', (['"""Answer:"""'], {}), "('Answer:')\n", (777, 788), True, 'import streamlit as st\n'), ((647, 678), 'streamlit.error', 'st.error', (['"""Invalid Youtube URL"""'], {}), "('Invalid Youtube URL')\n", (655, 678), True, 'import streamlit as st\n'), ((806, 839), 'textwrap.fill', 'textwrap.fill', (['response'], {'width': '(45)'}), '(response, width=45)\n', (819, 839), False, 'import textwrap\n'), ((870, 895), 'streamlit.error', 'st.error', (['"""Invalid Query"""'], {}), "('Invalid Query')\n", (878, 895), True, 'import streamlit as st\n')]
import numpy as np from langchain.prompts import PromptTemplate from langchain.schema import StrOutputParser, BaseRetriever from langchain.schema.runnable import RunnablePassthrough from langchain_google_genai import ChatGoogleGenerativeAI from trulens_eval.feedback.provider.langchain import Langchain from trulens_eval import Tru, Feedback, TruChain from trulens_eval.feedback import Groundedness from trulens_eval.schema import Select from trulens_eval.utils.serial import all_queries from trulens_eval.utils.json import jsonify from src.embeddings import build_base_embeddings from src.vectordb import load_chroma from src.reranker import build_reranker from src.retrieval_qa import build_rerank_retriever from src.llms import build_llm # Setup RAG embedding_function = build_base_embeddings() vectordb = load_chroma(embedding_function) reranker = build_reranker() retriever = build_rerank_retriever(vectordb, reranker) llm = build_llm() QA_TEMPLATE = """You are an assistant for question-answering tasks. \ Use the following pieces of retrieved context to answer the question. \ If you don't know the answer, just say you don't know. Question: {question} Context: {context} Answer:""" prompt = PromptTemplate.from_template(QA_TEMPLATE) def format_docs(docs): return "\n\n".join(doc.page_content for doc in docs) rag_chain = ( {"context": retriever | format_docs, "question": RunnablePassthrough} | prompt | llm | StrOutputParser ) # Evaluate with trulens-eval # Define provider and database _llm = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0) provider = Langchain(chain=_llm) database_url = "sqlite:///data/trulens.db" tru = Tru(database_url=database_url, database_redact_keys=True) # tru.reset_database() # Using TruChain app_json = jsonify(rag_chain) retrievers = [] for lens in all_queries(app_json): try: comp = lens.get_sole_item(rag_chain) if isinstance(comp, BaseRetriever): retrievers.append((lens, comp)) except Exception: pass context = ( (Select.RecordCalls + retrievers[0][0]).get_relevant_documents.rets[:].page_content ) f_qa_relevance = Feedback( provider.relevance_with_cot_reasonse, name="Answer Relevance" ).on_input_output() f_context_relevance = ( Feedback(provider.qs_relevance_with_cot_reasons, name="Context Relevance") .on_input() .on(context) .aggregate(np.mean) ) grounded = Groundedness(groundedness_provider=provider) f_groundedness = ( Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness") .on(context.collect()) .on_output() .aggregate(grounded.grounded_statements_aggregator) ) app_id = "Chain1" tru_recorder = TruChain( rag_chain, app_id=app_id, feedbacks=[ f_qa_relevance, f_context_relevance, f_groundedness, ], ) qns = ... for qn in qns: with tru_recorder as recording: res = rag_chain.invoke(qn) # Results # dashboard tru.run_dashboard(port=8601) # # dataframe # records_df, feednack = tru.get_records_and_feednack(app_ids=[app_id]) # records_df.head()
[ "langchain.prompts.PromptTemplate.from_template", "langchain_google_genai.ChatGoogleGenerativeAI" ]
[((778, 801), 'src.embeddings.build_base_embeddings', 'build_base_embeddings', ([], {}), '()\n', (799, 801), False, 'from src.embeddings import build_base_embeddings\n'), ((813, 844), 'src.vectordb.load_chroma', 'load_chroma', (['embedding_function'], {}), '(embedding_function)\n', (824, 844), False, 'from src.vectordb import load_chroma\n'), ((856, 872), 'src.reranker.build_reranker', 'build_reranker', ([], {}), '()\n', (870, 872), False, 'from src.reranker import build_reranker\n'), ((885, 927), 'src.retrieval_qa.build_rerank_retriever', 'build_rerank_retriever', (['vectordb', 'reranker'], {}), '(vectordb, reranker)\n', (907, 927), False, 'from src.retrieval_qa import build_rerank_retriever\n'), ((934, 945), 'src.llms.build_llm', 'build_llm', ([], {}), '()\n', (943, 945), False, 'from src.llms import build_llm\n'), ((1205, 1246), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['QA_TEMPLATE'], {}), '(QA_TEMPLATE)\n', (1233, 1246), False, 'from langchain.prompts import PromptTemplate\n'), ((1536, 1593), 'langchain_google_genai.ChatGoogleGenerativeAI', 'ChatGoogleGenerativeAI', ([], {'model': '"""gemini-pro"""', 'temperature': '(0)'}), "(model='gemini-pro', temperature=0)\n", (1558, 1593), False, 'from langchain_google_genai import ChatGoogleGenerativeAI\n'), ((1605, 1626), 'trulens_eval.feedback.provider.langchain.Langchain', 'Langchain', ([], {'chain': '_llm'}), '(chain=_llm)\n', (1614, 1626), False, 'from trulens_eval.feedback.provider.langchain import Langchain\n'), ((1677, 1734), 'trulens_eval.Tru', 'Tru', ([], {'database_url': 'database_url', 'database_redact_keys': '(True)'}), '(database_url=database_url, database_redact_keys=True)\n', (1680, 1734), False, 'from trulens_eval import Tru, Feedback, TruChain\n'), ((1787, 1805), 'trulens_eval.utils.json.jsonify', 'jsonify', (['rag_chain'], {}), '(rag_chain)\n', (1794, 1805), False, 'from trulens_eval.utils.json import jsonify\n'), ((1834, 1855), 'trulens_eval.utils.serial.all_queries', 'all_queries', (['app_json'], {}), '(app_json)\n', (1845, 1855), False, 'from trulens_eval.utils.serial import all_queries\n'), ((2426, 2470), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (2438, 2470), False, 'from trulens_eval.feedback import Groundedness\n'), ((2709, 2812), 'trulens_eval.TruChain', 'TruChain', (['rag_chain'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_context_relevance, f_groundedness]'}), '(rag_chain, app_id=app_id, feedbacks=[f_qa_relevance,\n f_context_relevance, f_groundedness])\n', (2717, 2812), False, 'from trulens_eval import Tru, Feedback, TruChain\n'), ((2155, 2226), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasonse'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasonse, name='Answer Relevance')\n", (2163, 2226), False, 'from trulens_eval import Tru, Feedback, TruChain\n'), ((2280, 2354), 'trulens_eval.Feedback', 'Feedback', (['provider.qs_relevance_with_cot_reasons'], {'name': '"""Context Relevance"""'}), "(provider.qs_relevance_with_cot_reasons, name='Context Relevance')\n", (2288, 2354), False, 'from trulens_eval import Tru, Feedback, TruChain\n'), ((2494, 2571), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (2502, 2571), False, 'from trulens_eval import Tru, Feedback, TruChain\n')]
import langchain.vectorstores.opensearch_vector_search as ovs from opensearchpy import OpenSearch, RequestsHttpConnection, AWSV4SignerAuth, helpers from langchain.vectorstores import OpenSearchVectorSearch def create_ovs_client( collection_id, index_name, region, boto3_session, bedrock_embeddings, ) -> OpenSearchVectorSearch: service = "aoss" host = f"{collection_id}.{region}.aoss.amazonaws.com" credentials = boto3_session.get_credentials() http_auth = AWSV4SignerAuth(credentials, region, service) aoss_runtime_client = OpenSearch( hosts=[{"host": host, "port": 443}], http_auth=http_auth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection, timeout=300, pool_maxsize=20, ) patch_langchain(ovs, aoss_runtime_client) db = OpenSearchVectorSearch( opensearch_url=host, http_auth=http_auth, index_name=index_name, engine="nmslib", space_type="cosinesimil", embedding_function=bedrock_embeddings, ) return db def patch_langchain(ovs, aoss_runtime_client): def get_opensearch_client(opensearch_url: str, **kwargs): return aoss_runtime_client ovs._get_opensearch_client = get_opensearch_client
[ "langchain.vectorstores.OpenSearchVectorSearch" ]
[((496, 541), 'opensearchpy.AWSV4SignerAuth', 'AWSV4SignerAuth', (['credentials', 'region', 'service'], {}), '(credentials, region, service)\n', (511, 541), False, 'from opensearchpy import OpenSearch, RequestsHttpConnection, AWSV4SignerAuth, helpers\n'), ((569, 750), 'opensearchpy.OpenSearch', 'OpenSearch', ([], {'hosts': "[{'host': host, 'port': 443}]", 'http_auth': 'http_auth', 'use_ssl': '(True)', 'verify_certs': '(True)', 'connection_class': 'RequestsHttpConnection', 'timeout': '(300)', 'pool_maxsize': '(20)'}), "(hosts=[{'host': host, 'port': 443}], http_auth=http_auth,\n use_ssl=True, verify_certs=True, connection_class=\n RequestsHttpConnection, timeout=300, pool_maxsize=20)\n", (579, 750), False, 'from opensearchpy import OpenSearch, RequestsHttpConnection, AWSV4SignerAuth, helpers\n'), ((862, 1040), 'langchain.vectorstores.OpenSearchVectorSearch', 'OpenSearchVectorSearch', ([], {'opensearch_url': 'host', 'http_auth': 'http_auth', 'index_name': 'index_name', 'engine': '"""nmslib"""', 'space_type': '"""cosinesimil"""', 'embedding_function': 'bedrock_embeddings'}), "(opensearch_url=host, http_auth=http_auth, index_name\n =index_name, engine='nmslib', space_type='cosinesimil',\n embedding_function=bedrock_embeddings)\n", (884, 1040), False, 'from langchain.vectorstores import OpenSearchVectorSearch\n')]
# import environment variables from data.env_variables import AZURE_OPENAI_DEPLOYMENT_NAME, AZURE_OPENAI_MODEL_NAME, \ AZURE_OPENAI_API_ENDPOINT, OPENAI_API_VERSION, AZURE_OPENAI_API_KEY, \ HUGGINGFACE_API_TOKEN, LLAMA2_API_TOKEN, OPENAI_API_KEY, NVIDIANGC_API_KEY from dotenv import load_dotenv # import software general purpose libs import os import psutil import logging as log # import langchain debug mode from langchain.globals import set_debug # import langchain document loader from langchain_community.document_loaders import PyPDFLoader from langchain_community.document_loaders import Docx2txtLoader # import message handlers from streamlit_chat import message from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler # import embedding processing objects from langchain.text_splitter import RecursiveCharacterTextSplitter # import vector database from langchain.vectorstores.chroma import Chroma # import data retrieval chain from langchain.chains import RetrievalQAWithSourcesChain # import langchain models from huggingface from langchain.llms.huggingface_hub import HuggingFaceHub from langchain.llms.huggingface_pipeline import HuggingFacePipeline from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint # import langchain models from langchain.llms.gpt4all import GPT4All from langchain.chat_models import AzureChatOpenAI from langchain.chat_models import ChatOpenAI from langchain_nvidia_ai_endpoints import ChatNVIDIA from data.vectorize import get_embeddings_model # import hugging face transformers lib - only for quantized models # import transformers # from transformers import BitsAndBytesConfig, AutoTokenizer, AutoModelForCausalLM, AutoConfig, pipeline # import streamlit web framework import streamlit as st # start debugging set_debug(True) # start logging log.basicConfig(filename="logs/app.log", level=log.DEBUG) N_THREADS = psutil.cpu_count() def load_vector_database(): log.info("Initializing Vector DB") sentence_transformer_ef = get_embeddings_model() st.session_state.vectordb = Chroma(persist_directory="./documents_cache/qa_retrieval", embedding_function=sentence_transformer_ef) def get_local_gpt4all_models(): local_models = {} local_models["ggml-gpt4all-j-v1.3-groovy"] = "./model_cache/ggml-gpt4all-j-v1.3-groovy.bin" local_models["mistral-7b-openorca.Q4_0"] = "./model_cache/mistral-7b-openorca.Q4_0.gguf" # local_models["ggml-mpt-7b-instruct"] = "./model_cache/ggml-mpt-7b-instruct.bin" # local_models["ggml-gpt4all-l13b-snoozy"] = "./model_cache/ggml-gpt4all-l13b-snoozy.bin" # local_models["ggml-v3-13b-hermes-q5_1"] = "./model_cache/ggml-v3-13b-hermes-q5_1.bin" # local_models["ggml-vicuna-13b-1.1-q4_2"] = "./model_cache/ggml-vicuna-13b-1.1-q4_2.bin" return local_models def get_llm_instance(model_interface: str): if model_interface == "azure": llm_instance = AzureChatOpenAI( deployment_name=AZURE_OPENAI_DEPLOYMENT_NAME, model_name=AZURE_OPENAI_MODEL_NAME, azure_endpoint=AZURE_OPENAI_API_ENDPOINT, openai_api_version=OPENAI_API_VERSION, openai_api_key=AZURE_OPENAI_API_KEY, openai_api_type="azure" ) elif model_interface == "openai": llm_instance = ChatOpenAI( temperature=0.1, openai_api_key="" ) elif model_interface == "gpt4all": local_models = get_local_gpt4all_models() callbacks = [StreamingStdOutCallbackHandler()] llm_instance = GPT4All( # model=local_models["mistral-7b-openorca.Q4_0"], model="model_cache/zephyr-7b-beta.Q3_K_S.gguf", # allow_download=True, callbacks=callbacks, verbose=True, # device="gpu", device="nvidia", # n_threads=16, # n_threads=N_THREADS, ) elif model_interface == "huggingface-falcon": llm_instance = HuggingFaceHub( verbose=True, task="text-generation", repo_id="tiiuae/falcon-40b-instruct" ) elif model_interface == "huggingface-mistral-7b": llm_instance = HuggingFacePipeline.from_model_id( # model_id="mistralai/Mistral-7B-Instruct-v0.1", model_id="Open-Orca/Mistral-7B-OpenOrca", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}, device=0 ) elif model_interface == "huggingface-endpoint-zephyr-7b": endpoint_url = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta" headers = {"Authorization": "Bearer "} llm_instance = HuggingFaceEndpoint( endpoint_url=endpoint_url, task="text-generation", huggingfacehub_api_token=HUGGINGFACE_API_TOKEN ) elif model_interface == "zephyr-7b-beta": llm_instance = HuggingFacePipeline.from_model_id( model_id="HuggingFaceH4/zephyr-7b-beta", task="text-generation", # pipeline_kwargs={"max_new_tokens": 10}, device=0 ) elif model_interface == "huggingface-api-llama2": llm_instance = HuggingFacePipeline.from_model_id( model_id="meta-llama/Llama-2-7b-chat-hf", task="text-generation", device="cuda", pipeline_kwargs={ "token": LLAMA2_API_TOKEN } ) elif model_interface == "nvidia-mixtral": callbacks = [StreamingStdOutCallbackHandler()] llm_instance = ChatNVIDIA( model="mixtral_8x7b", nvidia_api_key=NVIDIANGC_API_KEY, callbacks=callbacks, temperature=0.2, top_p=0.7, max_tokens=1024, seed=42 ) return llm_instance def initialize_conversation_chain(): vectordb = st.session_state.vectordb callbacks = [StreamingStdOutCallbackHandler()] local_models = get_local_gpt4all_models() retriever_instance = vectordb.as_retriever(search_kwargs={'k':4}) # llm_instance = get_llm_instance("huggingface-endpoint-zephyr-7b") llm_instance = get_llm_instance("nvidia-mixtral") # llm_instance = get_llm_instance("gpt4all") log.info("Inicializando") st.session_state.qa_chain = RetrievalQAWithSourcesChain.from_chain_type( llm=llm_instance, chain_type="stuff", retriever=retriever_instance ) def handle_user_input(user_question, response_container): if user_question is None: return qa_chain:RetrievalQAWithSourcesChain = st.session_state.qa_chain response_container.empty() # Handle user Queries with response_container.container(): with st.spinner("Gerando resposta..."): log.info(f"Gerando resposta para consulta do cliente: {user_question}") user_question += " (responda resumidamente em pt-br)" response = qa_chain({"question":user_question}, return_only_outputs=True) # st.write(response) st.write(response["answer"]) with st.expander(label="Sources", expanded=False): for source in response["sources"]: st.write(source) def process_new_uploads(uploaded_files): vectordb:Chroma = st.session_state.vectordb for doc in uploaded_files: log.info(f"Processa arquivo: {doc.name}") with open(os.path.join("tmp_documents", doc.name), "wb") as f: f.write(doc.getbuffer()) extension = doc.name.split(".")[-1] filepath = f"./tmp_documents/{doc.name}" if extension == "pdf": loader = PyPDFLoader(file_path=filepath) elif extension == "docx" or extension == "doc": loader = Docx2txtLoader(file_path=filepath) text_splitter = RecursiveCharacterTextSplitter(chunk_size=640, chunk_overlap=128) log.info("Particiona texto") text_chunks = text_splitter.split_documents(loader.load()) # log.info("Chunks: %s", text_chunks) log.info("Processa embeddings e adiciona documento ao Vector DB") vectordb.add_documents(documents=text_chunks) vectordb.persist() os.remove(f"./tmp_documents/{doc.name}") log.info(f"Arquivo processado com sucesso: {doc.name}") def main(): load_dotenv() st.set_page_config(page_title="Converse com seus documentos", page_icon=":books:") st.header("Converse com seus documentos :books:") if "vectordb" not in st.session_state: with st.spinner("Inicializando Vector DB..."): load_vector_database() if "qa_chain" not in st.session_state: with st.spinner("Inicializando AI Model..."): initialize_conversation_chain() user_question = st.text_input("Faça sua pergunta aqui") response_container = st.empty() if user_question: handle_user_input(user_question, response_container) user_question = None with st.sidebar: st.subheader("Seus documentos") uploaded_files = st.file_uploader( "Insira seu arquivo aqui (.pdf, .docx) e clique em 'Processar'", accept_multiple_files=True ) if st.button("Processar"): with st.spinner("Processando..."): process_new_uploads(uploaded_files) if __name__ == "__main__": main()
[ "langchain_community.document_loaders.PyPDFLoader", "langchain_community.document_loaders.Docx2txtLoader", "langchain.llms.huggingface_pipeline.HuggingFacePipeline.from_model_id", "langchain.vectorstores.chroma.Chroma", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler", "langchain.llms.gpt4all.GPT4All", "langchain.chat_models.ChatOpenAI", "langchain.chains.RetrievalQAWithSourcesChain.from_chain_type", "langchain.chat_models.AzureChatOpenAI", "langchain.globals.set_debug", "langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint", "langchain.llms.huggingface_hub.HuggingFaceHub", "langchain_nvidia_ai_endpoints.ChatNVIDIA" ]
[((1820, 1835), 'langchain.globals.set_debug', 'set_debug', (['(True)'], {}), '(True)\n', (1829, 1835), False, 'from langchain.globals import set_debug\n'), ((1853, 1910), 'logging.basicConfig', 'log.basicConfig', ([], {'filename': '"""logs/app.log"""', 'level': 'log.DEBUG'}), "(filename='logs/app.log', level=log.DEBUG)\n", (1868, 1910), True, 'import logging as log\n'), ((1925, 1943), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (1941, 1943), False, 'import psutil\n'), ((1978, 2012), 'logging.info', 'log.info', (['"""Initializing Vector DB"""'], {}), "('Initializing Vector DB')\n", (1986, 2012), True, 'import logging as log\n'), ((2043, 2065), 'data.vectorize.get_embeddings_model', 'get_embeddings_model', ([], {}), '()\n', (2063, 2065), False, 'from data.vectorize import get_embeddings_model\n'), ((2103, 2209), 'langchain.vectorstores.chroma.Chroma', 'Chroma', ([], {'persist_directory': '"""./documents_cache/qa_retrieval"""', 'embedding_function': 'sentence_transformer_ef'}), "(persist_directory='./documents_cache/qa_retrieval',\n embedding_function=sentence_transformer_ef)\n", (2109, 2209), False, 'from langchain.vectorstores.chroma import Chroma\n'), ((6313, 6338), 'logging.info', 'log.info', (['"""Inicializando"""'], {}), "('Inicializando')\n", (6321, 6338), True, 'import logging as log\n'), ((6371, 6487), 'langchain.chains.RetrievalQAWithSourcesChain.from_chain_type', 'RetrievalQAWithSourcesChain.from_chain_type', ([], {'llm': 'llm_instance', 'chain_type': '"""stuff"""', 'retriever': 'retriever_instance'}), "(llm=llm_instance, chain_type=\n 'stuff', retriever=retriever_instance)\n", (6414, 6487), False, 'from langchain.chains import RetrievalQAWithSourcesChain\n'), ((8476, 8489), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (8487, 8489), False, 'from dotenv import load_dotenv\n'), ((8494, 8581), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Converse com seus documentos"""', 'page_icon': '""":books:"""'}), "(page_title='Converse com seus documentos', page_icon=\n ':books:')\n", (8512, 8581), True, 'import streamlit as st\n'), ((8586, 8635), 'streamlit.header', 'st.header', (['"""Converse com seus documentos :books:"""'], {}), "('Converse com seus documentos :books:')\n", (8595, 8635), True, 'import streamlit as st\n'), ((8945, 8984), 'streamlit.text_input', 'st.text_input', (['"""Faça sua pergunta aqui"""'], {}), "('Faça sua pergunta aqui')\n", (8958, 8984), True, 'import streamlit as st\n'), ((9015, 9025), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (9023, 9025), True, 'import streamlit as st\n'), ((2949, 3203), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'deployment_name': 'AZURE_OPENAI_DEPLOYMENT_NAME', 'model_name': 'AZURE_OPENAI_MODEL_NAME', 'azure_endpoint': 'AZURE_OPENAI_API_ENDPOINT', 'openai_api_version': 'OPENAI_API_VERSION', 'openai_api_key': 'AZURE_OPENAI_API_KEY', 'openai_api_type': '"""azure"""'}), "(deployment_name=AZURE_OPENAI_DEPLOYMENT_NAME, model_name=\n AZURE_OPENAI_MODEL_NAME, azure_endpoint=AZURE_OPENAI_API_ENDPOINT,\n openai_api_version=OPENAI_API_VERSION, openai_api_key=\n AZURE_OPENAI_API_KEY, openai_api_type='azure')\n", (2964, 3203), False, 'from langchain.chat_models import AzureChatOpenAI\n'), ((5977, 6009), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (6007, 6009), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((7460, 7501), 'logging.info', 'log.info', (['f"""Processa arquivo: {doc.name}"""'], {}), "(f'Processa arquivo: {doc.name}')\n", (7468, 7501), True, 'import logging as log\n'), ((7951, 8016), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(640)', 'chunk_overlap': '(128)'}), '(chunk_size=640, chunk_overlap=128)\n', (7981, 8016), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((8034, 8062), 'logging.info', 'log.info', (['"""Particiona texto"""'], {}), "('Particiona texto')\n", (8042, 8062), True, 'import logging as log\n'), ((8193, 8258), 'logging.info', 'log.info', (['"""Processa embeddings e adiciona documento ao Vector DB"""'], {}), "('Processa embeddings e adiciona documento ao Vector DB')\n", (8201, 8258), True, 'import logging as log\n'), ((8349, 8389), 'os.remove', 'os.remove', (['f"""./tmp_documents/{doc.name}"""'], {}), "(f'./tmp_documents/{doc.name}')\n", (8358, 8389), False, 'import os\n'), ((8398, 8453), 'logging.info', 'log.info', (['f"""Arquivo processado com sucesso: {doc.name}"""'], {}), "(f'Arquivo processado com sucesso: {doc.name}')\n", (8406, 8453), True, 'import logging as log\n'), ((9173, 9204), 'streamlit.subheader', 'st.subheader', (['"""Seus documentos"""'], {}), "('Seus documentos')\n", (9185, 9204), True, 'import streamlit as st\n'), ((9230, 9348), 'streamlit.file_uploader', 'st.file_uploader', (['"""Insira seu arquivo aqui (.pdf, .docx) e clique em \'Processar\'"""'], {'accept_multiple_files': '(True)'}), '(\n "Insira seu arquivo aqui (.pdf, .docx) e clique em \'Processar\'",\n accept_multiple_files=True)\n', (9246, 9348), True, 'import streamlit as st\n'), ((9385, 9407), 'streamlit.button', 'st.button', (['"""Processar"""'], {}), "('Processar')\n", (9394, 9407), True, 'import streamlit as st\n'), ((3333, 3379), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'openai_api_key': '""""""'}), "(temperature=0.1, openai_api_key='')\n", (3343, 3379), False, 'from langchain.chat_models import ChatOpenAI\n'), ((6808, 6841), 'streamlit.spinner', 'st.spinner', (['"""Gerando resposta..."""'], {}), "('Gerando resposta...')\n", (6818, 6841), True, 'import streamlit as st\n'), ((6855, 6926), 'logging.info', 'log.info', (['f"""Gerando resposta para consulta do cliente: {user_question}"""'], {}), "(f'Gerando resposta para consulta do cliente: {user_question}')\n", (6863, 6926), True, 'import logging as log\n'), ((7137, 7165), 'streamlit.write', 'st.write', (["response['answer']"], {}), "(response['answer'])\n", (7145, 7165), True, 'import streamlit as st\n'), ((7774, 7805), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', ([], {'file_path': 'filepath'}), '(file_path=filepath)\n', (7785, 7805), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((8697, 8737), 'streamlit.spinner', 'st.spinner', (['"""Inicializando Vector DB..."""'], {}), "('Inicializando Vector DB...')\n", (8707, 8737), True, 'import streamlit as st\n'), ((8835, 8874), 'streamlit.spinner', 'st.spinner', (['"""Inicializando AI Model..."""'], {}), "('Inicializando AI Model...')\n", (8845, 8874), True, 'import streamlit as st\n'), ((3582, 3693), 'langchain.llms.gpt4all.GPT4All', 'GPT4All', ([], {'model': '"""model_cache/zephyr-7b-beta.Q3_K_S.gguf"""', 'callbacks': 'callbacks', 'verbose': '(True)', 'device': '"""nvidia"""'}), "(model='model_cache/zephyr-7b-beta.Q3_K_S.gguf', callbacks=callbacks,\n verbose=True, device='nvidia')\n", (3589, 3693), False, 'from langchain.llms.gpt4all import GPT4All\n'), ((7196, 7240), 'streamlit.expander', 'st.expander', ([], {'label': '"""Sources"""', 'expanded': '(False)'}), "(label='Sources', expanded=False)\n", (7207, 7240), True, 'import streamlit as st\n'), ((7529, 7568), 'os.path.join', 'os.path.join', (['"""tmp_documents"""', 'doc.name'], {}), "('tmp_documents', doc.name)\n", (7541, 7568), False, 'import os\n'), ((7883, 7917), 'langchain_community.document_loaders.Docx2txtLoader', 'Docx2txtLoader', ([], {'file_path': 'filepath'}), '(file_path=filepath)\n', (7897, 7917), False, 'from langchain_community.document_loaders import Docx2txtLoader\n'), ((9426, 9454), 'streamlit.spinner', 'st.spinner', (['"""Processando..."""'], {}), "('Processando...')\n", (9436, 9454), True, 'import streamlit as st\n'), ((3524, 3556), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (3554, 3556), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((4010, 4105), 'langchain.llms.huggingface_hub.HuggingFaceHub', 'HuggingFaceHub', ([], {'verbose': '(True)', 'task': '"""text-generation"""', 'repo_id': '"""tiiuae/falcon-40b-instruct"""'}), "(verbose=True, task='text-generation', repo_id=\n 'tiiuae/falcon-40b-instruct')\n", (4024, 4105), False, 'from langchain.llms.huggingface_hub import HuggingFaceHub\n'), ((7313, 7329), 'streamlit.write', 'st.write', (['source'], {}), '(source)\n', (7321, 7329), True, 'import streamlit as st\n'), ((4224, 4377), 'langchain.llms.huggingface_pipeline.HuggingFacePipeline.from_model_id', 'HuggingFacePipeline.from_model_id', ([], {'model_id': '"""Open-Orca/Mistral-7B-OpenOrca"""', 'task': '"""text-generation"""', 'pipeline_kwargs': "{'max_new_tokens': 10}", 'device': '(0)'}), "(model_id='Open-Orca/Mistral-7B-OpenOrca',\n task='text-generation', pipeline_kwargs={'max_new_tokens': 10}, device=0)\n", (4257, 4377), False, 'from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n'), ((4723, 4845), 'langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint', 'HuggingFaceEndpoint', ([], {'endpoint_url': 'endpoint_url', 'task': '"""text-generation"""', 'huggingfacehub_api_token': 'HUGGINGFACE_API_TOKEN'}), "(endpoint_url=endpoint_url, task='text-generation',\n huggingfacehub_api_token=HUGGINGFACE_API_TOKEN)\n", (4742, 4845), False, 'from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint\n'), ((4957, 5069), 'langchain.llms.huggingface_pipeline.HuggingFacePipeline.from_model_id', 'HuggingFacePipeline.from_model_id', ([], {'model_id': '"""HuggingFaceH4/zephyr-7b-beta"""', 'task': '"""text-generation"""', 'device': '(0)'}), "(model_id='HuggingFaceH4/zephyr-7b-beta',\n task='text-generation', device=0)\n", (4990, 5069), False, 'from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n'), ((5243, 5410), 'langchain.llms.huggingface_pipeline.HuggingFacePipeline.from_model_id', 'HuggingFacePipeline.from_model_id', ([], {'model_id': '"""meta-llama/Llama-2-7b-chat-hf"""', 'task': '"""text-generation"""', 'device': '"""cuda"""', 'pipeline_kwargs': "{'token': LLAMA2_API_TOKEN}"}), "(model_id='meta-llama/Llama-2-7b-chat-hf',\n task='text-generation', device='cuda', pipeline_kwargs={'token':\n LLAMA2_API_TOKEN})\n", (5276, 5410), False, 'from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n'), ((5615, 5760), 'langchain_nvidia_ai_endpoints.ChatNVIDIA', 'ChatNVIDIA', ([], {'model': '"""mixtral_8x7b"""', 'nvidia_api_key': 'NVIDIANGC_API_KEY', 'callbacks': 'callbacks', 'temperature': '(0.2)', 'top_p': '(0.7)', 'max_tokens': '(1024)', 'seed': '(42)'}), "(model='mixtral_8x7b', nvidia_api_key=NVIDIANGC_API_KEY,\n callbacks=callbacks, temperature=0.2, top_p=0.7, max_tokens=1024, seed=42)\n", (5625, 5760), False, 'from langchain_nvidia_ai_endpoints import ChatNVIDIA\n'), ((5558, 5590), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (5588, 5590), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')]
import langchain import openai import streamlit import hubspot # Retrieve customer preferences and previous interactions from Hubspot customer_preferences = hubspot.get_customer_preferences() previous_interactions = hubspot.get_previous_interactions() # Generate personalized reminders using Langchain analysis and OpenAI latest_interaction = previous_interactions[-1] analysis_result = langchain.analyze(latest_interaction) reminder_message = openai.generate_reminder_message(analysis_result, customer_preferences) # Send reminder message to customer via email or text reminder_type = customer_preferences.get("reminder_type") if reminder_type == "email": hubspot.send_email_reminder(reminder_message) elif reminder_type == "text": hubspot.send_text_reminder(reminder_message)
[ "langchain.analyze" ]
[((164, 198), 'hubspot.get_customer_preferences', 'hubspot.get_customer_preferences', ([], {}), '()\n', (196, 198), False, 'import hubspot\n'), ((224, 259), 'hubspot.get_previous_interactions', 'hubspot.get_previous_interactions', ([], {}), '()\n', (257, 259), False, 'import hubspot\n'), ((400, 437), 'langchain.analyze', 'langchain.analyze', (['latest_interaction'], {}), '(latest_interaction)\n', (417, 437), False, 'import langchain\n'), ((458, 529), 'openai.generate_reminder_message', 'openai.generate_reminder_message', (['analysis_result', 'customer_preferences'], {}), '(analysis_result, customer_preferences)\n', (490, 529), False, 'import openai\n'), ((681, 726), 'hubspot.send_email_reminder', 'hubspot.send_email_reminder', (['reminder_message'], {}), '(reminder_message)\n', (708, 726), False, 'import hubspot\n'), ((763, 807), 'hubspot.send_text_reminder', 'hubspot.send_text_reminder', (['reminder_message'], {}), '(reminder_message)\n', (789, 807), False, 'import hubspot\n')]
import logging import re from typing import Any, List, Optional import langchain from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_openai import ChatOpenAI from init_openai import init_openai logger = logging.getLogger("SoCloverAI") init_openai() model_name = "gpt-4-1106-preview" def set_trial(trial: int) -> None: langchain.llm_cache.inner_cache.set_trial(trial) def dump_cache_stats_since_last_call() -> None: logger.info(langchain.llm_cache.get_cache_stats_summary()) langchain.llm_cache.clear_cache_stats() def create_llm_model(temperature: float, model_name: str) -> ChatOpenAI: # mypy seems confused about the model_name parameter: # Unexpected keyword argument "model_name" for "ChatOpenAI" result = ChatOpenAI(temperature=temperature, model_name=model_name) # type: ignore return result async def predict(temperature: float, template: str, **kwargs: Any) -> List[str]: prompt = PromptTemplate( template=template.strip(), input_variables=["word0", "word1"] ) llm = create_llm_model(temperature, model_name) chain = LLMChain(llm=llm, prompt=prompt, verbose=False) output = await chain.apredict(**kwargs) logger.debug(output) predictions = parse_candidates(output) best = parse_best(output) if best: predictions = [best] + predictions strip_chars = ' \t"' predictions = [prediction.strip(strip_chars) for prediction in predictions] predictions = [prediction for prediction in predictions if prediction] # remove duplicates while preserving order seen = set() unique_predictions = list() for prediction in predictions: if prediction not in seen: unique_predictions.append(prediction) seen.add(prediction) predictions = unique_predictions return predictions def parse_candidates(output: str) -> List[str]: result = [] for line in output.splitlines(): if not line.startswith("Candidates:"): continue candidates_str = line[len("Candidates: ") :] candidates = candidates_str.split(",") candidates = [candidate.strip() for candidate in candidates] result += candidates return result pattern = re.compile(r"Best: (.*)") def parse_best(output: str) -> Optional[str]: match = pattern.search(output) if match: return match.group(1) split_output = output.split() if len(split_output) == 1: logger.info(f"Invalid output format: {output}") return split_output[0] logger.info(f"Invalid output: {output}") return None
[ "langchain.llm_cache.get_cache_stats_summary", "langchain_openai.ChatOpenAI", "langchain.llm_cache.inner_cache.set_trial", "langchain.llm_cache.clear_cache_stats", "langchain.chains.LLMChain" ]
[((252, 283), 'logging.getLogger', 'logging.getLogger', (['"""SoCloverAI"""'], {}), "('SoCloverAI')\n", (269, 283), False, 'import logging\n'), ((284, 297), 'init_openai.init_openai', 'init_openai', ([], {}), '()\n', (295, 297), False, 'from init_openai import init_openai\n'), ((2273, 2297), 're.compile', 're.compile', (['"""Best: (.*)"""'], {}), "('Best: (.*)')\n", (2283, 2297), False, 'import re\n'), ((373, 421), 'langchain.llm_cache.inner_cache.set_trial', 'langchain.llm_cache.inner_cache.set_trial', (['trial'], {}), '(trial)\n', (414, 421), False, 'import langchain\n'), ((539, 578), 'langchain.llm_cache.clear_cache_stats', 'langchain.llm_cache.clear_cache_stats', ([], {}), '()\n', (576, 578), False, 'import langchain\n'), ((791, 849), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'model_name': 'model_name'}), '(temperature=temperature, model_name=model_name)\n', (801, 849), False, 'from langchain_openai import ChatOpenAI\n'), ((1137, 1184), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'verbose': '(False)'}), '(llm=llm, prompt=prompt, verbose=False)\n', (1145, 1184), False, 'from langchain.chains import LLMChain\n'), ((488, 533), 'langchain.llm_cache.get_cache_stats_summary', 'langchain.llm_cache.get_cache_stats_summary', ([], {}), '()\n', (531, 533), False, 'import langchain\n')]
from langchain.chains.openai_functions.openapi import get_openapi_chain import langchain langchain.verbose=True chain = get_openapi_chain("https://api.speak.com/openapi.yaml", verbose=True) import json # Insertion data = {"name": "John", "age": 30, "city": "New York"} print(json.dumps(data, indent=4, ensure_ascii=False)) #词语解释plugin API output= chain.run("How would you say \'Pydantic\' in Chinese") print("###### explanation:\n" ,output.get("explanation") ) print("\n###### extra_response_instructions:\n" ,output.get("extra_response_instructions") ) #chain = get_openapi_chain("https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml" , verbose=True) #print( chain.run("What's the today's comic?") )
[ "langchain.chains.openai_functions.openapi.get_openapi_chain" ]
[((122, 191), 'langchain.chains.openai_functions.openapi.get_openapi_chain', 'get_openapi_chain', (['"""https://api.speak.com/openapi.yaml"""'], {'verbose': '(True)'}), "('https://api.speak.com/openapi.yaml', verbose=True)\n", (139, 191), False, 'from langchain.chains.openai_functions.openapi import get_openapi_chain\n'), ((279, 325), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(data, indent=4, ensure_ascii=False)\n', (289, 325), False, 'import json\n')]
import asyncio import os import json import tiktoken from transcribe import file_to_json_path, get_recordings, get_all_recordings, print_json import langchain from langchain.llms import OpenAI from langchain.cache import SQLiteCache from langchain.chat_models import ChatOpenAI from langchain import PromptTemplate from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.schema import ( HumanMessage, ) from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from transformers import AutoTokenizer # MAX_TRANSCRIPT_LENGTH = 1536 MAX_TRANSCRIPT_LENGTH = 1800 ANSWER_START_LENGTH = 50 NAMESPACE = 'Startup Interviews' database_path = "data/.langchain.db" langchain.llm_cache = SQLiteCache(database_path) training_tokenizer_name = "huggyllama/llama-13b" trainer_tokenizer = AutoTokenizer.from_pretrained(training_tokenizer_name) async def main(): data = get_recordings(f"data/{NAMESPACE}") # print(json.dumps(data, indent=4)) all_recordings = get_all_recordings(data) # print_json(all_recordings) # print_json(len(all_recordings)) # limit to only 2 recordings # all_recordings = all_recordings[:10] chat_items = [] for i, recording in enumerate(all_recordings): # print(f"{i}: {recording['filePath']}") # print(f"{i + 1} of {len(all_recordings)}: {recording['title']}") json_file_path = file_to_json_path(recording['filePath']) if not os.path.exists(json_file_path): print(f"\tJSON file does not exist at {json_file_path}") continue with open(json_file_path, 'r') as json_file: json_data = json.load(json_file) # print(json.dumps(json_data, indent=4)) """ "results": { "channels": [ { "alternatives": [ { "transcript": "...", "words": [ { "word": "i", "start": 0.0, "end": 0.16, "confidence": 0.99353653, "speaker": 0, "speaker_confidence": 0.8430252, "punctuated_word": "I" }, ] """ transcript = json_data['results']['channels'][0]['alternatives'][0] transcript_text = transcript['transcript'] words = transcript['words'] # print(len(words), len(transcript_text.split())) # count unique speakers num_speakers = get_num_speakers(words) # print(len(speakers)) # print(num_speakers) # if num_speakers > 5: if num_speakers != 1: continue if token_length(transcript_text) > MAX_TRANSCRIPT_LENGTH: print(f"\tSkipping \"{recording['title']}\" because it's too long: {token_length(transcript_text)}") continue # chat_item = { # 'title': recording['title'], # 'speakers': num_speakers, # 'text': transcript_text, # } # duplicate recording chat_item = recording.copy() # merge in an object with the transcript text chat_item.update({ 'speakers': num_speakers, 'text': transcript_text, }) chat_items.append(chat_item) # limit to only 2 chat items # chat_items = chat_items[:100] # return # add start_text and question to each chat item print(f"Generating {len(chat_items)} questions") count = len(chat_items) for i, chat_item in enumerate(chat_items): curr = i + 1 # print(f"{i+1} of {len(chat_items)} ({(perc)}) Generating question for {chat_item['title']}") # print(f"{curr} of {count} ({round(curr/count*100, 2)}%) Generating question for {chat_item['title']}") perc = round(curr/count*100, 2) print(f"{curr} of {count} ({perc}%): Generating question for {chat_item['title']}") start_text = get_start_text(chat_item['text']) question = get_question(chat_item['title'], start_text) print(f"\tQ: {question}") chat_item.update({ 'start_text': start_text, 'question': question, }) # print_json(chat_items) print_json(len(chat_items)) write_jsonl(chat_items, "train") def get_num_speakers(words): speakers = set() for word in words: speakers.add(word['speaker']) num_speakers = len(speakers) return num_speakers enc = tiktoken.get_encoding("cl100k_base") def get_tokens(contents): return enc.encode(contents) # return tokenizer(contents)['input_ids'] def decode_tokens(tokens): return enc.decode(tokens) # return tokenizer.decode(tokens) def get_start_text(contents): tokens = get_tokens(contents) # if longer than ANSWER_START_LENGTH tokens, truncate and add ... if len(tokens) > ANSWER_START_LENGTH: return decode_tokens(tokens[:ANSWER_START_LENGTH]) + '...' else: return decode_tokens(tokens) def token_length(contents): return len(get_tokens(contents)) def token_length_for_trainer(contents): return len(trainer_tokenizer(contents)['input_ids']) def get_question(title, reply): template="You are a helpful, truthful, detailed assistant writing a transcript of an interview." system_message_prompt = SystemMessagePromptTemplate.from_template(template) human_template="""Task: Write the question which is most likely to produce the following reply. Interview Title: {title} Reply: {reply} Question:""" human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) chat = ChatOpenAI(streaming=False, temperature=0) resp = chat(chat_prompt.format_prompt(title=title, reply=reply).to_messages()) return resp.content # Write chat to .json in format: # [{ "instruction": "...", "input": "...", "output": "..." }, ...] def write_jsonl(chat_items, name = 'chat'): chat_file_path = f"data/{NAMESPACE}/{name}.jsonl" # create rows print(f"Creating rows: {len(chat_items)}") rows = [] for chat_item in chat_items: row = { "instruction": chat_item['question'], "input": "", "output": chat_item['text'], "instruction_length": token_length_for_trainer(chat_item['question']), "output_length": token_length_for_trainer(chat_item['text']), "title": chat_item['title'], "start": chat_item['start_text'], } rows.append(row) # write rows to file with open(chat_file_path, 'w') as chat_file: # for chat_item in chat_items: # # start_text = get_start_text(chat_item['text']) # # question = get_question(chat_item['title'], start_text) # row = { # # "instruction": question, # "instruction": chat_item['question'], # "input": "", # "output": chat_item['text'], # "len": token_length(chat_item['text']), # "title": chat_item['title'], # # "start": start_text, # "start": chat_item['start_text'], # } for row in rows: chat_file.write(json.dumps(row, ensure_ascii=False) + '\n') print(f"Wrote {len(chat_items)} chat items to {chat_file_path}") max_instruction_len = max([row['instruction_length'] for row in rows]) max_output_len = max([row['output_length'] for row in rows]) print(f"Max instruction length: {max_instruction_len}") print(f"Max output length: {max_output_len}") if __name__ == "__main__": asyncio.run(main())
[ "langchain.prompts.chat.SystemMessagePromptTemplate.from_template", "langchain.chat_models.ChatOpenAI", "langchain.cache.SQLiteCache", "langchain.prompts.chat.HumanMessagePromptTemplate.from_template", "langchain.prompts.chat.ChatPromptTemplate.from_messages" ]
[((822, 848), 'langchain.cache.SQLiteCache', 'SQLiteCache', (['database_path'], {}), '(database_path)\n', (833, 848), False, 'from langchain.cache import SQLiteCache\n'), ((919, 973), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['training_tokenizer_name'], {}), '(training_tokenizer_name)\n', (948, 973), False, 'from transformers import AutoTokenizer\n'), ((4987, 5023), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""cl100k_base"""'], {}), "('cl100k_base')\n", (5008, 5023), False, 'import tiktoken\n'), ((1004, 1039), 'transcribe.get_recordings', 'get_recordings', (['f"""data/{NAMESPACE}"""'], {}), "(f'data/{NAMESPACE}')\n", (1018, 1039), False, 'from transcribe import file_to_json_path, get_recordings, get_all_recordings, print_json\n'), ((1101, 1125), 'transcribe.get_all_recordings', 'get_all_recordings', (['data'], {}), '(data)\n', (1119, 1125), False, 'from transcribe import file_to_json_path, get_recordings, get_all_recordings, print_json\n'), ((5841, 5892), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (5882, 5892), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((6076, 6132), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (6116, 6132), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((6151, 6230), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (6183, 6230), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((6243, 6285), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(False)', 'temperature': '(0)'}), '(streaming=False, temperature=0)\n', (6253, 6285), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1497, 1537), 'transcribe.file_to_json_path', 'file_to_json_path', (["recording['filePath']"], {}), "(recording['filePath'])\n", (1514, 1537), False, 'from transcribe import file_to_json_path, get_recordings, get_all_recordings, print_json\n'), ((1553, 1583), 'os.path.exists', 'os.path.exists', (['json_file_path'], {}), '(json_file_path)\n', (1567, 1583), False, 'import os\n'), ((1753, 1773), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1762, 1773), False, 'import json\n'), ((7833, 7868), 'json.dumps', 'json.dumps', (['row'], {'ensure_ascii': '(False)'}), '(row, ensure_ascii=False)\n', (7843, 7868), False, 'import json\n')]
import json import streamlit as st import streamlit_ext as ste import os import time import gc import pandas as pd from dotenv import load_dotenv from langchain.chains import LLMChain # import LangChain libraries from langchain.llms import OpenAI # import OpenAI model from langchain.chat_models import ChatOpenAI # import OpenAI chat model from langchain.callbacks import get_openai_callback # import OpenAI callbacks from langchain.prompts import PromptTemplate # import PromptTemplate from langchain.llms import HuggingFacePipeline # import HuggingFacePipeline import torch # import torch # pip install git+https://github.com/huggingface/transformers from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList def main(): load_dotenv(".env") pipe = None open_ai_key = None uploaded_file = None # import css tasks and prompts with open('prompts.json') as f: promptlib = json.load(f) hide_default_format = """ <style> #MainMenu {visibility: hidden; } footer {visibility: hidden;} </style> """ st.markdown(hide_default_format, unsafe_allow_html=True) # title st.title("Prompt Compass") st.subheader( "A Tool for Navigating LLMs and Prompts for Computational Social Science and Digital Humanities Research") # Add Link to your repo st.markdown( ''' [![Repo](https://badgen.net/badge/icon/GitHub?icon=github&label)](https://github.com/ErikBorra/PromptCompass) [![DOI](https://zenodo.org/badge/649855474.svg)](https://zenodo.org/badge/latestdoi/649855474) ''', unsafe_allow_html=True) # load available models model_with_names = [ model for model in promptlib['models'] if model['name']] # create input area for model selection input_values = {} input_values['model'] = st.selectbox('Select a model', model_with_names, format_func=lambda x: x['name']) # If there is no previous state, set the default model as the first model if not st.session_state.get('previous_model'): st.session_state['previous_model'] = model_with_names[0]['name'] st.caption(f"Model info: [{input_values['model']['name']}]({input_values['model']['resource']})" + ( f". {input_values['model']['comment']}" if 'comment' in input_values['model'] else "")) # ask for open ai key if no key is set in .env if input_values['model']['resource'] in ["https://platform.openai.com/docs/models/gpt-3-5", "https://platform.openai.com/docs/models/gpt-4"]: # Load the OpenAI API key from the environment variable if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "": open_ai_key = st.text_input("Open AI API Key", "") else: open_ai_key = os.getenv("OPENAI_API_KEY") # set default values do_sample = False temperature = 0.001 top_p = -1 max_new_tokens = -1 with st.expander("Advanced settings"): if input_values['model']['resource'] not in ["https://platform.openai.com/docs/models/gpt-3-5", "https://platform.openai.com/docs/models/gpt-4"]: st.markdown( """ **Set Maximum Length**: Determines the maximum number of tokens of the **generated** text. A token is approximately four characters word, although this depends on the model. A value of -1 means the parameter will not be specified. """ ) max_new_tokens = st.number_input( 'Maximum Length', value=256, min_value=-1, step=1) st.markdown( """ **Set do_sample**: This controls how the model generates text. If do_sample=True, the model will use a probabilistic approach to generate text, where the likelihood of each word being chosen depends on its predicted probability. Use the below parameters to further control its behavior. If do_sample=False, the model will use a deterministic approach and always choose the most likely next word. """ ) do_sample = st.radio( 'Set do_sample', ('False', 'True') ) st.markdown( """ **Temperature**: Controls the randomness in the model's responses. Lower values (closer to 0.0) make the output more deterministic, while higher values (closer to 2.0) make it more diverse. A value of -1 means the parameter will not be specified. """ ) temperature = st.number_input( 'Set Temperature', min_value=-1.0, max_value=2.0, value=0.001, format="%.3f") st.markdown( """ **Top P**: Also known as "nucleus sampling", is an alternative to temperature that can also be used to control the randomness of the model's responses. It essentially trims the less likely options in the model's distribution of possible responses. Possible values lie between 0.0 and 1.0. A value of -1 means the parameter will not be specified. Only applies if do_sample=True. """ ) top_p = st.number_input('Set Top-P', min_value=- 1.0, max_value=1.0, value=-1.0) # Check for correct values allgood = True # set model kwargs model_kwargs = {} if input_values['model']['resource'] not in ["https://platform.openai.com/docs/models/gpt-3-5", "https://platform.openai.com/docs/models/gpt-4"]: # check if max_new_tokens is at least 1 or -1 if not (max_new_tokens > 0 or max_new_tokens == -1): st.error( 'Error: Max Tokens must be at least 1. Choose -1 if you want to use the default model value.') max_new_tokens = -1 allgood = False if max_new_tokens > 0: model_kwargs['max_new_tokens'] = max_new_tokens if do_sample not in ['True', 'False']: st.error( 'Error: do_Sample must be True or False') do_sample = False allgood = False do_sample = True if do_sample == 'True' else False if do_sample in [True, False]: model_kwargs['do_sample'] = do_sample if not (0 <= temperature <= 2 or temperature == -1): st.error( "Temperature value must be between 0 and 2. Choose -1 if you want to use the default model value.") temperature = -1 allgood = False if 0 <= temperature <= 2: model_kwargs['temperature'] = temperature if not (0 <= top_p <= 1 or top_p == -1): st.error( "Top P value must be between 0 and 1. Choose -1 if you want to use the default model value.") top_p = -1 allgood = False if 0 <= top_p <= 1: model_kwargs['top_p'] = top_p # create input area for task selection tasks_with_names = [task for task in promptlib['tasks'] if task['name']] task = st.selectbox('Select a task', tasks_with_names, format_func=lambda x: x['name'] + " - " + x['authors']) # Create input areas for prompts and user input if task: # concatenate all strings from prompt array prompt = '\n'.join(task['prompt']) # create input area for prompt input_values['prompt'] = st.text_area( "Inspect, and possibly modify, the prompt by ["+task['authors']+"]("+task['paper']+")", prompt, height=200) # allow the user to select the input type input_type = st.radio("Choose input type:", ('Text input', 'Upload a CSV'), horizontal=True) if input_type == 'Text input': # create input area for user input input_values['user'] = st.text_area( "Input to be analyzed with the prompt (one thing per line):", "this user is happy\none user is just a user\nthe other user is a lier") # if the user's input is not a list (e.g. a string), then split it by newlines if isinstance(input_values['user'], str): input_values['user'] = input_values['user'].split('\n') original_data = pd.DataFrame( input_values['user'], columns=['user_input']) else: # upload CSV uploaded_file = st.file_uploader("Choose a CSV file", type="csv") if uploaded_file is not None: # convert the uploaded file to a dataframe original_data = pd.read_csv(uploaded_file) # ask user to select a column column_to_extract = st.selectbox( 'Choose a column to apply the prompt on:', original_data.columns) # process the selected column from the dataframe input_values['user'] = original_data[column_to_extract].tolist() data = pd.DataFrame() # Determine the output file name filename = uploaded_file.name if uploaded_file else 'output.csv' base_filename, file_extension = os.path.splitext(filename) output_filename = f"{base_filename}_promptcompass{file_extension}" repeat_input = st.number_input( 'Enter the number of times the prompt/input combination should be repeated:', min_value=1, max_value=10, value=1, step=1) # Submit button submit_button = st.button('Submit') st.write('---') # Add a horizontal line # Process form submission if submit_button and allgood: if 'user' not in input_values or input_values['user'] is None: st.error("No user input provided") else: with st.spinner(text="In progress..."): try: start_time = time.time() st.write("Start time: " + time.strftime("%H:%M:%S", time.localtime())) if input_values['prompt'] and input_values['user']: # create prompt template # add location of user input to prompt if task['location_of_input'] == 'before': template = "{user_input}" + \ "\n\n" + input_values['prompt'] elif task['location_of_input'] == 'after': template = input_values['prompt'] + \ "\n\n" + "{user_input}" else: template = input_values['prompt'] # make sure users don't forget the user input variable if "{user_input}" not in template: template = template + "\n\n{user_input}" # fill prompt template prompt_template = PromptTemplate( input_variables=["user_input"], template=template) # loop over user values in prompt for key, user_input in enumerate(input_values['user']): for i in range(repeat_input): num_prompt_tokens = None num_completion_tokens = None cost = None user_input = str(user_input).strip() if user_input == "" or user_input == "nan": continue # set up and run the model model_id = input_values['model']['name'] if model_id in ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4-0125-preview', 'gpt-4-1106-preview', 'gpt-4', 'gpt-4-32k', 'gpt-3.5-turbo-instruct', 'babbage-002', 'davinci-002']: if open_ai_key is None or open_ai_key == "": st.error( "Please provide an Open AI API Key") exit(1) with get_openai_callback() as cb: if model_id in ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4-0125-preview', 'gpt-4-1106-preview', 'gpt-4', 'gpt-4-32k']: llm = ChatOpenAI( model=model_id, openai_api_key=open_ai_key, **model_kwargs) else: llm = OpenAI( model=model_id, openai_api_key=open_ai_key, **model_kwargs) llm_chain = LLMChain( llm=llm, prompt=prompt_template) output = llm_chain.run(user_input) st.success("Input: " + user_input + " \n\n " + "Output: " + output) st.text(cb) num_prompt_tokens = cb.prompt_tokens num_completion_tokens = cb.completion_tokens cost = cb.total_cost elif model_id in ['meta-llama/Llama-2-7b-chat-hf', 'meta-llama/Llama-2-13b-chat-hf']: if pipe == None: with st.status('Loading model %s' % model_id) as status: # to use the llama-2 models, # you first need to get access to the llama-2 models via e.g. https://huggingface.co/meta-llama/Llama-2-7b-chat-hf # once accepted, get a hugging face auth token https://huggingface.co/settings/tokens # and then run `huggingface-cli login` on the command line, filling in the generated token if model_id in ['meta-llama/Llama-2-7b-chat-hf', 'meta-llama/Llama-2-13b-chat-hf']: tokenizer = AutoTokenizer.from_pretrained( model_id, use_auth_token=True) else: tokenizer = AutoTokenizer.from_pretrained( model_id) if model_id == "meta-llama/Llama-2-13b-chat-hf": pipe = pipeline( "text-generation", model=model_id, tokenizer=tokenizer, # torch_dtype="auto", trust_remote_code=True, device_map="auto", num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs ) else: pipe = pipeline( "text-generation", model=model_id, tokenizer=tokenizer, torch_dtype="auto", trust_remote_code=True, device_map="auto", num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs ) local_llm = HuggingFacePipeline( pipeline=pipe) status.update( label='Model %s loaded' % model_id, state="complete") llm_chain = LLMChain( llm=local_llm, prompt=prompt_template) output = llm_chain.run(user_input) st.success("Input: " + user_input + " \n\n " + "Output: " + output) elif model_id in ['google/flan-t5-large', 'google/flan-t5-xl', 'tiiuae/falcon-7b-instruct', 'tiiuae/falcon-40b-instruct', 'databricks/dolly-v2-3b', 'databricks/dolly-v2-7b']: if pipe is None: with st.status('Loading model %s' % model_id) as status: tokenizer = AutoTokenizer.from_pretrained( model_id) if model_id in ['google/flan-t5-large', 'google/flan-t5-xl']: model = AutoModelForSeq2SeqLM.from_pretrained( model_id, load_in_8bit=False, device_map='auto') pipe = pipeline( "text2text-generation", model=model_id, tokenizer=tokenizer, torch_dtype="auto", trust_remote_code=True, device_map="auto", num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs ) # elif model_id in ['tiiuae/falcon-7b-instruct', 'tiiuae/falcon-40b-instruct']: else: pipe = pipeline( "text-generation", model=model_id, tokenizer=tokenizer, torch_dtype="auto", trust_remote_code=True, device_map="auto", num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs ) local_llm = HuggingFacePipeline( pipeline=pipe) status.update( label='Model %s loaded' % model_id, state="complete") llm_chain = LLMChain( llm=local_llm, prompt=prompt_template) output = llm_chain.run(user_input) st.success("Input: " + user_input + " \n\n " + "Output: " + output) elif model_id == "mosaicml/mpt-7b-instruct": if pipe is None: with st.status('Loading model %s' % model_id) as status: model = AutoModelForCausalLM.from_pretrained( model_id, trust_remote_code=True, torch_dtype=torch.bfloat16, max_seq_len=2048, device_map="auto" ) # MPT-7B model was trained using the EleutherAI/gpt-neox-20b tokenizer tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/gpt-neox-20b") # mtp-7b is trained to add "<|endoftext|>" at the end of generations stop_token_ids = tokenizer.convert_tokens_to_ids( ["<|endoftext|>"]) # define custom stopping criteria object class StopOnTokens(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: for stop_id in stop_token_ids: if input_ids[0][-1] == stop_id: return True return False stopping_criteria = StoppingCriteriaList( [StopOnTokens()]) pipe = pipeline( task='text-generation', model=model, tokenizer=tokenizer, torch_dtype="auto", device_map="auto", num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs, return_full_text=True, # langchain expects the full text stopping_criteria=stopping_criteria, # without this model will ramble repetition_penalty=1.1 # without this output begins repeating ) local_llm = HuggingFacePipeline( pipeline=pipe) status.update( label='Model %s loaded' % model_id, state="complete") llm_chain = LLMChain( llm=local_llm, prompt=prompt_template) output = llm_chain.run(user_input) st.success("Input: " + user_input + " \n\n " + "Output: " + output) elif model_id == "allenai/OLMo-7B" or model_id == "ehartford/dolphin-2.1-mistral-7b" or model_id == "lvkaokao/mistral-7b-finetuned-orca-dpo-v2" or model_id == "lmsys/vicuna-13b-v1.5" or model_id == "microsoft/Orca-2-13b": if pipe is None: with st.status('Loading model %s' % model_id) as status: model = AutoModelForCausalLM.from_pretrained( model_id, trust_remote_code=True, torch_dtype=torch.bfloat16, device_map="auto" ) if model_id == "ehartford/dolphin-2.1-mistral-7b": tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False) else: tokenizer = AutoTokenizer.from_pretrained( model_id) pipe = pipeline( task='text-generation', model=model, tokenizer=tokenizer, torch_dtype="auto", device_map="auto", num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs, return_full_text=True, # langchain expects the full text ) local_llm = HuggingFacePipeline( pipeline=pipe) status.update( label='Model %s loaded' % model_id, state="complete") llm_chain = LLMChain( llm=local_llm, prompt=prompt_template) output = llm_chain.run(user_input) st.success("Input: " + user_input + " \n\n " + "Output: " + output) else: st.error("Model %s not found" % model_id) exit(1) if not num_prompt_tokens or not num_completion_tokens: num_prompt_tokens = len(tokenizer.tokenize( prompt_template.format(user_input=user_input))) num_completion_tokens = len(tokenizer.tokenize( output)) # Prepare data as dictionary original_row = original_data.loc[key].copy() new_row = { 'user_input': user_input, 'output': output, 'llm': model_id, 'prompt name': task['name'], 'prompt authors': task['authors'], 'prompt': template, 'timestamp': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), '# prompt tokens': str(int(num_prompt_tokens)), '# completion tokens': str(int(num_completion_tokens)), 'max_new_tokens': int(model_kwargs['max_new_tokens']) if "max_new_tokens" in model_kwargs else None, 'do_sample': int(model_kwargs['do_sample']) if "do_sample" in model_kwargs else None, 'temperature': model_kwargs['temperature'] if "temperature" in model_kwargs else None, 'top_p': model_kwargs['top_p'] if "top_p" in model_kwargs else None, 'cost': cost if cost is not None else None } # Update the original row with the new data for key2, value in new_row.items(): original_row[key2] = value # Append the updated row to the DataFrame updated_row_df = pd.DataFrame([original_row]) data = pd.concat( [data, updated_row_df], ignore_index=True) st.subheader("Results") st.dataframe(data, column_config={}, hide_index=True) # make output available as csv csv = data.to_csv(index=False).encode('utf-8') ste.download_button( "Download CSV", csv, output_filename, "text/csv", ) end_time = time.time() elapsed_time = end_time - start_time st.write("End time: " + time.strftime("%H:%M:%S", time.localtime())) st.write("Elapsed time: " + str(round(elapsed_time, 2)) + " seconds") except Exception as e: st.error(e) finally: # free up variables if 'data' in locals() and data is not None: del data if 'pipe' in locals() and pipe is not None: del pipe if 'llm_chain' in locals() and llm_chain is not None: del llm_chain if 'llm' in locals() and llm is not None: del llm if 'local_llm' in locals() and local_llm is not None: del local_llm if 'model' in locals() and model is not None: del model if 'tokenizer' in locals() and tokenizer is not None: del tokenizer gc.collect() # garbage collection # empty cuda cache torch.cuda.empty_cache() if __name__ == "__main__": main()
[ "langchain.llms.OpenAI", "langchain.llms.HuggingFacePipeline", "langchain.chat_models.ChatOpenAI", "langchain.callbacks.get_openai_callback", "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate" ]
[((813, 832), 'dotenv.load_dotenv', 'load_dotenv', (['""".env"""'], {}), "('.env')\n", (824, 832), False, 'from dotenv import load_dotenv\n'), ((1156, 1212), 'streamlit.markdown', 'st.markdown', (['hide_default_format'], {'unsafe_allow_html': '(True)'}), '(hide_default_format, unsafe_allow_html=True)\n', (1167, 1212), True, 'import streamlit as st\n'), ((1230, 1256), 'streamlit.title', 'st.title', (['"""Prompt Compass"""'], {}), "('Prompt Compass')\n", (1238, 1256), True, 'import streamlit as st\n'), ((1261, 1390), 'streamlit.subheader', 'st.subheader', (['"""A Tool for Navigating LLMs and Prompts for Computational Social Science and Digital Humanities Research"""'], {}), "(\n 'A Tool for Navigating LLMs and Prompts for Computational Social Science and Digital Humanities Research'\n )\n", (1273, 1390), True, 'import streamlit as st\n'), ((1422, 1705), 'streamlit.markdown', 'st.markdown', (['"""\n [![Repo](https://badgen.net/badge/icon/GitHub?icon=github&label)](https://github.com/ErikBorra/PromptCompass)\n [![DOI](https://zenodo.org/badge/649855474.svg)](https://zenodo.org/badge/latestdoi/649855474)\n """'], {'unsafe_allow_html': '(True)'}), '(\n """\n [![Repo](https://badgen.net/badge/icon/GitHub?icon=github&label)](https://github.com/ErikBorra/PromptCompass)\n [![DOI](https://zenodo.org/badge/649855474.svg)](https://zenodo.org/badge/latestdoi/649855474)\n """\n , unsafe_allow_html=True)\n', (1433, 1705), True, 'import streamlit as st\n'), ((1919, 2005), 'streamlit.selectbox', 'st.selectbox', (['"""Select a model"""', 'model_with_names'], {'format_func': "(lambda x: x['name'])"}), "('Select a model', model_with_names, format_func=lambda x: x[\n 'name'])\n", (1931, 2005), True, 'import streamlit as st\n'), ((2250, 2451), 'streamlit.caption', 'st.caption', (['(f"Model info: [{input_values[\'model\'][\'name\']}]({input_values[\'model\'][\'resource\']})"\n + (f". {input_values[\'model\'][\'comment\']}" if \'comment\' in\n input_values[\'model\'] else \'\'))'], {}), '(\n f"Model info: [{input_values[\'model\'][\'name\']}]({input_values[\'model\'][\'resource\']})"\n + (f". {input_values[\'model\'][\'comment\']}" if \'comment\' in\n input_values[\'model\'] else \'\'))\n', (2260, 2451), True, 'import streamlit as st\n'), ((7021, 7129), 'streamlit.selectbox', 'st.selectbox', (['"""Select a task"""', 'tasks_with_names'], {'format_func': "(lambda x: x['name'] + ' - ' + x['authors'])"}), "('Select a task', tasks_with_names, format_func=lambda x: x[\n 'name'] + ' - ' + x['authors'])\n", (7033, 7129), True, 'import streamlit as st\n'), ((9240, 9387), 'streamlit.number_input', 'st.number_input', (['"""Enter the number of times the prompt/input combination should be repeated:"""'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(1)', 'step': '(1)'}), "(\n 'Enter the number of times the prompt/input combination should be repeated:'\n , min_value=1, max_value=10, value=1, step=1)\n", (9255, 9387), True, 'import streamlit as st\n'), ((9428, 9447), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (9437, 9447), True, 'import streamlit as st\n'), ((9453, 9468), 'streamlit.write', 'st.write', (['"""---"""'], {}), "('---')\n", (9461, 9468), True, 'import streamlit as st\n'), ((990, 1002), 'json.load', 'json.load', (['f'], {}), '(f)\n', (999, 1002), False, 'import json\n'), ((2132, 2170), 'streamlit.session_state.get', 'st.session_state.get', (['"""previous_model"""'], {}), "('previous_model')\n", (2152, 2170), True, 'import streamlit as st\n'), ((3045, 3077), 'streamlit.expander', 'st.expander', (['"""Advanced settings"""'], {}), "('Advanced settings')\n", (3056, 3077), True, 'import streamlit as st\n'), ((4283, 4592), 'streamlit.markdown', 'st.markdown', (['"""\n **Temperature**: Controls the randomness in the model\'s responses.\n Lower values (closer to 0.0) make the output more deterministic, while higher values (closer to 2.0) make it more diverse.\n A value of -1 means the parameter will not be specified.\n """'], {}), '(\n """\n **Temperature**: Controls the randomness in the model\'s responses.\n Lower values (closer to 0.0) make the output more deterministic, while higher values (closer to 2.0) make it more diverse.\n A value of -1 means the parameter will not be specified.\n """\n )\n', (4294, 4592), True, 'import streamlit as st\n'), ((4627, 4725), 'streamlit.number_input', 'st.number_input', (['"""Set Temperature"""'], {'min_value': '(-1.0)', 'max_value': '(2.0)', 'value': '(0.001)', 'format': '"""%.3f"""'}), "('Set Temperature', min_value=-1.0, max_value=2.0, value=\n 0.001, format='%.3f')\n", (4642, 4725), True, 'import streamlit as st\n'), ((4743, 5184), 'streamlit.markdown', 'st.markdown', (['"""\n **Top P**: Also known as "nucleus sampling", is an alternative to temperature that can also be used to control the randomness of the model\'s responses.\n It essentially trims the less likely options in the model\'s distribution of possible responses. Possible values lie between 0.0 and 1.0. \n A value of -1 means the parameter will not be specified. Only applies if do_sample=True.\n """'], {}), '(\n """\n **Top P**: Also known as "nucleus sampling", is an alternative to temperature that can also be used to control the randomness of the model\'s responses.\n It essentially trims the less likely options in the model\'s distribution of possible responses. Possible values lie between 0.0 and 1.0. \n A value of -1 means the parameter will not be specified. Only applies if do_sample=True.\n """\n )\n', (4754, 5184), True, 'import streamlit as st\n'), ((5213, 5284), 'streamlit.number_input', 'st.number_input', (['"""Set Top-P"""'], {'min_value': '(-1.0)', 'max_value': '(1.0)', 'value': '(-1.0)'}), "('Set Top-P', min_value=-1.0, max_value=1.0, value=-1.0)\n", (5228, 5284), True, 'import streamlit as st\n'), ((6364, 6482), 'streamlit.error', 'st.error', (['"""Temperature value must be between 0 and 2. Choose -1 if you want to use the default model value."""'], {}), "(\n 'Temperature value must be between 0 and 2. Choose -1 if you want to use the default model value.'\n )\n", (6372, 6482), True, 'import streamlit as st\n'), ((6668, 6780), 'streamlit.error', 'st.error', (['"""Top P value must be between 0 and 1. Choose -1 if you want to use the default model value."""'], {}), "(\n 'Top P value must be between 0 and 1. Choose -1 if you want to use the default model value.'\n )\n", (6676, 6780), True, 'import streamlit as st\n'), ((7384, 7517), 'streamlit.text_area', 'st.text_area', (["('Inspect, and possibly modify, the prompt by [' + task['authors'] + '](' +\n task['paper'] + ')')", 'prompt'], {'height': '(200)'}), "('Inspect, and possibly modify, the prompt by [' + task[\n 'authors'] + '](' + task['paper'] + ')', prompt, height=200)\n", (7396, 7517), True, 'import streamlit as st\n'), ((7590, 7669), 'streamlit.radio', 'st.radio', (['"""Choose input type:"""', "('Text input', 'Upload a CSV')"], {'horizontal': '(True)'}), "('Choose input type:', ('Text input', 'Upload a CSV'), horizontal=True)\n", (7598, 7669), True, 'import streamlit as st\n'), ((8948, 8962), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8960, 8962), True, 'import pandas as pd\n'), ((9118, 9144), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (9134, 9144), False, 'import os\n'), ((2820, 2856), 'streamlit.text_input', 'st.text_input', (['"""Open AI API Key"""', '""""""'], {}), "('Open AI API Key', '')\n", (2833, 2856), True, 'import streamlit as st\n'), ((2897, 2924), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2906, 2924), False, 'import os\n'), ((3245, 3542), 'streamlit.markdown', 'st.markdown', (['"""\n **Set Maximum Length**: Determines the maximum number of tokens of the **generated** text. A token is approximately four characters word, although this depends on the model.\n A value of -1 means the parameter will not be specified.\n """'], {}), '(\n """\n **Set Maximum Length**: Determines the maximum number of tokens of the **generated** text. A token is approximately four characters word, although this depends on the model.\n A value of -1 means the parameter will not be specified.\n """\n )\n', (3256, 3542), True, 'import streamlit as st\n'), ((3592, 3658), 'streamlit.number_input', 'st.number_input', (['"""Maximum Length"""'], {'value': '(256)', 'min_value': '(-1)', 'step': '(1)'}), "('Maximum Length', value=256, min_value=-1, step=1)\n", (3607, 3658), True, 'import streamlit as st\n'), ((3688, 4139), 'streamlit.markdown', 'st.markdown', (['"""\n **Set do_sample**: This controls how the model generates text. If do_sample=True, the model will use a probabilistic approach to generate text, where the likelihood of each word being chosen depends on its predicted probability. Use the below parameters to further control its behavior. If do_sample=False, the model will use a deterministic approach and always choose the most likely next word. \n """'], {}), '(\n """\n **Set do_sample**: This controls how the model generates text. If do_sample=True, the model will use a probabilistic approach to generate text, where the likelihood of each word being chosen depends on its predicted probability. Use the below parameters to further control its behavior. If do_sample=False, the model will use a deterministic approach and always choose the most likely next word. \n """\n )\n', (3699, 4139), True, 'import streamlit as st\n'), ((4184, 4228), 'streamlit.radio', 'st.radio', (['"""Set do_sample"""', "('False', 'True')"], {}), "('Set do_sample', ('False', 'True'))\n", (4192, 4228), True, 'import streamlit as st\n'), ((5692, 5805), 'streamlit.error', 'st.error', (['"""Error: Max Tokens must be at least 1. Choose -1 if you want to use the default model value."""'], {}), "(\n 'Error: Max Tokens must be at least 1. Choose -1 if you want to use the default model value.'\n )\n", (5700, 5805), True, 'import streamlit as st\n'), ((6024, 6074), 'streamlit.error', 'st.error', (['"""Error: do_Sample must be True or False"""'], {}), "('Error: do_Sample must be True or False')\n", (6032, 6074), True, 'import streamlit as st\n'), ((7822, 7975), 'streamlit.text_area', 'st.text_area', (['"""Input to be analyzed with the prompt (one thing per line):"""', '"""this user is happy\none user is just a user\nthe other user is a lier"""'], {}), '(\'Input to be analyzed with the prompt (one thing per line):\',\n """this user is happy\none user is just a user\nthe other user is a lier""")\n', (7834, 7975), True, 'import streamlit as st\n'), ((8248, 8306), 'pandas.DataFrame', 'pd.DataFrame', (["input_values['user']"], {'columns': "['user_input']"}), "(input_values['user'], columns=['user_input'])\n", (8260, 8306), True, 'import pandas as pd\n'), ((8391, 8440), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a CSV file"""'], {'type': '"""csv"""'}), "('Choose a CSV file', type='csv')\n", (8407, 8440), True, 'import streamlit as st\n'), ((9642, 9676), 'streamlit.error', 'st.error', (['"""No user input provided"""'], {}), "('No user input provided')\n", (9650, 9676), True, 'import streamlit as st\n'), ((2720, 2747), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2729, 2747), False, 'import os\n'), ((2759, 2786), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2768, 2786), False, 'import os\n'), ((8575, 8601), 'pandas.read_csv', 'pd.read_csv', (['uploaded_file'], {}), '(uploaded_file)\n', (8586, 8601), True, 'import pandas as pd\n'), ((8685, 8763), 'streamlit.selectbox', 'st.selectbox', (['"""Choose a column to apply the prompt on:"""', 'original_data.columns'], {}), "('Choose a column to apply the prompt on:', original_data.columns)\n", (8697, 8763), True, 'import streamlit as st\n'), ((9709, 9742), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""In progress..."""'}), "(text='In progress...')\n", (9719, 9742), True, 'import streamlit as st\n'), ((9800, 9811), 'time.time', 'time.time', ([], {}), '()\n', (9809, 9811), False, 'import time\n'), ((29699, 29710), 'time.time', 'time.time', ([], {}), '()\n', (29708, 29710), False, 'import time\n'), ((30887, 30899), 'gc.collect', 'gc.collect', ([], {}), '()\n', (30897, 30899), False, 'import gc\n'), ((30981, 31005), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (31003, 31005), False, 'import torch\n'), ((10885, 10950), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['user_input']", 'template': 'template'}), "(input_variables=['user_input'], template=template)\n", (10899, 10950), False, 'from langchain.prompts import PromptTemplate\n'), ((29168, 29191), 'streamlit.subheader', 'st.subheader', (['"""Results"""'], {}), "('Results')\n", (29180, 29191), True, 'import streamlit as st\n'), ((29216, 29269), 'streamlit.dataframe', 'st.dataframe', (['data'], {'column_config': '{}', 'hide_index': '(True)'}), '(data, column_config={}, hide_index=True)\n', (29228, 29269), True, 'import streamlit as st\n'), ((29458, 29527), 'streamlit_ext.download_button', 'ste.download_button', (['"""Download CSV"""', 'csv', 'output_filename', '"""text/csv"""'], {}), "('Download CSV', csv, output_filename, 'text/csv')\n", (29477, 29527), True, 'import streamlit_ext as ste\n'), ((30065, 30076), 'streamlit.error', 'st.error', (['e'], {}), '(e)\n', (30073, 30076), True, 'import streamlit as st\n'), ((9913, 9929), 'time.localtime', 'time.localtime', ([], {}), '()\n', (9927, 9929), False, 'import time\n'), ((28985, 29013), 'pandas.DataFrame', 'pd.DataFrame', (['[original_row]'], {}), '([original_row])\n', (28997, 29013), True, 'import pandas as pd\n'), ((29053, 29105), 'pandas.concat', 'pd.concat', (['[data, updated_row_df]'], {'ignore_index': '(True)'}), '([data, updated_row_df], ignore_index=True)\n', (29062, 29105), True, 'import pandas as pd\n'), ((29867, 29883), 'time.localtime', 'time.localtime', ([], {}), '()\n', (29881, 29883), False, 'import time\n'), ((12017, 12062), 'streamlit.error', 'st.error', (['"""Please provide an Open AI API Key"""'], {}), "('Please provide an Open AI API Key')\n", (12025, 12062), True, 'import streamlit as st\n'), ((12197, 12218), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (12216, 12218), False, 'from langchain.callbacks import get_openai_callback\n'), ((12844, 12885), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template'}), '(llm=llm, prompt=prompt_template)\n', (12852, 12885), False, 'from langchain.chains import LLMChain\n'), ((13048, 13117), 'streamlit.success', 'st.success', (["('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)"], {}), "('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)\n", (13058, 13117), True, 'import streamlit as st\n'), ((13209, 13220), 'streamlit.text', 'st.text', (['cb'], {}), '(cb)\n', (13216, 13220), True, 'import streamlit as st\n'), ((16761, 16808), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'local_llm', 'prompt': 'prompt_template'}), '(llm=local_llm, prompt=prompt_template)\n', (16769, 16808), False, 'from langchain.chains import LLMChain\n'), ((16959, 17028), 'streamlit.success', 'st.success', (["('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)"], {}), "('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)\n", (16969, 17028), True, 'import streamlit as st\n'), ((27858, 27874), 'time.localtime', 'time.localtime', ([], {}), '()\n', (27872, 27874), False, 'import time\n'), ((12459, 12529), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': 'model_id', 'openai_api_key': 'open_ai_key'}), '(model=model_id, openai_api_key=open_ai_key, **model_kwargs)\n', (12469, 12529), False, 'from langchain.chat_models import ChatOpenAI\n'), ((12675, 12741), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model': 'model_id', 'openai_api_key': 'open_ai_key'}), '(model=model_id, openai_api_key=open_ai_key, **model_kwargs)\n', (12681, 12741), False, 'from langchain.llms import OpenAI\n'), ((19967, 20014), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'local_llm', 'prompt': 'prompt_template'}), '(llm=local_llm, prompt=prompt_template)\n', (19975, 20014), False, 'from langchain.chains import LLMChain\n'), ((20165, 20234), 'streamlit.success', 'st.success', (["('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)"], {}), "('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)\n", (20175, 20234), True, 'import streamlit as st\n'), ((13661, 13701), 'streamlit.status', 'st.status', (["('Loading model %s' % model_id)"], {}), "('Loading model %s' % model_id)\n", (13670, 13701), True, 'import streamlit as st\n'), ((16474, 16508), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'pipe'}), '(pipeline=pipe)\n', (16493, 16508), False, 'from langchain.llms import HuggingFacePipeline\n'), ((23653, 23700), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'local_llm', 'prompt': 'prompt_template'}), '(llm=local_llm, prompt=prompt_template)\n', (23661, 23700), False, 'from langchain.chains import LLMChain\n'), ((23851, 23920), 'streamlit.success', 'st.success', (["('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)"], {}), "('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)\n", (23861, 23920), True, 'import streamlit as st\n'), ((14398, 14458), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {'use_auth_token': '(True)'}), '(model_id, use_auth_token=True)\n', (14427, 14458), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((14622, 14661), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id)\n', (14651, 14661), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((14864, 15056), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_id', 'tokenizer': 'tokenizer', 'trust_remote_code': '(True)', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id'}), "('text-generation', model=model_id, tokenizer=tokenizer,\n trust_remote_code=True, device_map='auto', num_return_sequences=1,\n eos_token_id=tokenizer.eos_token_id, **model_kwargs)\n", (14872, 15056), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((15694, 15911), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_id', 'tokenizer': 'tokenizer', 'torch_dtype': '"""auto"""', 'trust_remote_code': '(True)', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id'}), "('text-generation', model=model_id, tokenizer=tokenizer,\n torch_dtype='auto', trust_remote_code=True, device_map='auto',\n num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs\n )\n", (15702, 15911), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((17381, 17421), 'streamlit.status', 'st.status', (["('Loading model %s' % model_id)"], {}), "('Loading model %s' % model_id)\n", (17390, 17421), True, 'import streamlit as st\n'), ((17489, 17528), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id)\n', (17518, 17528), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((19673, 19707), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'pipe'}), '(pipeline=pipe)\n', (19692, 19707), False, 'from langchain.llms import HuggingFacePipeline\n'), ((26365, 26412), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'local_llm', 'prompt': 'prompt_template'}), '(llm=local_llm, prompt=prompt_template)\n', (26373, 26412), False, 'from langchain.chains import LLMChain\n'), ((26563, 26632), 'streamlit.success', 'st.success', (["('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)"], {}), "('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)\n", (26573, 26632), True, 'import streamlit as st\n'), ((26754, 26795), 'streamlit.error', 'st.error', (["('Model %s not found' % model_id)"], {}), "('Model %s not found' % model_id)\n", (26762, 26795), True, 'import streamlit as st\n'), ((17741, 17831), 'transformers.AutoModelForSeq2SeqLM.from_pretrained', 'AutoModelForSeq2SeqLM.from_pretrained', (['model_id'], {'load_in_8bit': '(False)', 'device_map': '"""auto"""'}), "(model_id, load_in_8bit=False,\n device_map='auto')\n", (17778, 17831), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((17936, 18158), 'transformers.pipeline', 'pipeline', (['"""text2text-generation"""'], {'model': 'model_id', 'tokenizer': 'tokenizer', 'torch_dtype': '"""auto"""', 'trust_remote_code': '(True)', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id'}), "('text2text-generation', model=model_id, tokenizer=tokenizer,\n torch_dtype='auto', trust_remote_code=True, device_map='auto',\n num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs\n )\n", (17944, 18158), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((18893, 19110), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_id', 'tokenizer': 'tokenizer', 'torch_dtype': '"""auto"""', 'trust_remote_code': '(True)', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id'}), "('text-generation', model=model_id, tokenizer=tokenizer,\n torch_dtype='auto', trust_remote_code=True, device_map='auto',\n num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs\n )\n", (18901, 19110), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((20457, 20497), 'streamlit.status', 'st.status', (["('Loading model %s' % model_id)"], {}), "('Loading model %s' % model_id)\n", (20466, 20497), True, 'import streamlit as st\n'), ((20562, 20701), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'trust_remote_code': '(True)', 'torch_dtype': 'torch.bfloat16', 'max_seq_len': '(2048)', 'device_map': '"""auto"""'}), "(model_id, trust_remote_code=True,\n torch_dtype=torch.bfloat16, max_seq_len=2048, device_map='auto')\n", (20598, 20701), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((21156, 21212), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""EleutherAI/gpt-neox-20b"""'], {}), "('EleutherAI/gpt-neox-20b')\n", (21185, 21212), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((22359, 22638), 'transformers.pipeline', 'pipeline', ([], {'task': '"""text-generation"""', 'model': 'model', 'tokenizer': 'tokenizer', 'torch_dtype': '"""auto"""', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id', 'return_full_text': '(True)', 'stopping_criteria': 'stopping_criteria', 'repetition_penalty': '(1.1)'}), "(task='text-generation', model=model, tokenizer=tokenizer,\n torch_dtype='auto', device_map='auto', num_return_sequences=1,\n eos_token_id=tokenizer.eos_token_id, **model_kwargs, return_full_text=\n True, stopping_criteria=stopping_criteria, repetition_penalty=1.1)\n", (22367, 22638), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((23366, 23400), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'pipe'}), '(pipeline=pipe)\n', (23385, 23400), False, 'from langchain.llms import HuggingFacePipeline\n'), ((24320, 24360), 'streamlit.status', 'st.status', (["('Loading model %s' % model_id)"], {}), "('Loading model %s' % model_id)\n", (24329, 24360), True, 'import streamlit as st\n'), ((24425, 24546), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'trust_remote_code': '(True)', 'torch_dtype': 'torch.bfloat16', 'device_map': '"""auto"""'}), "(model_id, trust_remote_code=True,\n torch_dtype=torch.bfloat16, device_map='auto')\n", (24461, 24546), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((25301, 25514), 'transformers.pipeline', 'pipeline', ([], {'task': '"""text-generation"""', 'model': 'model', 'tokenizer': 'tokenizer', 'torch_dtype': '"""auto"""', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id', 'return_full_text': '(True)'}), "(task='text-generation', model=model, tokenizer=tokenizer,\n torch_dtype='auto', device_map='auto', num_return_sequences=1,\n eos_token_id=tokenizer.eos_token_id, **model_kwargs, return_full_text=True)\n", (25309, 25514), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((26078, 26112), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'pipe'}), '(pipeline=pipe)\n', (26097, 26112), False, 'from langchain.llms import HuggingFacePipeline\n'), ((24937, 24992), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {'use_fast': '(False)'}), '(model_id, use_fast=False)\n', (24966, 24992), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((25156, 25195), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id)\n', (25185, 25195), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n')]
import os import re import streamlit as st import pandas as pd import langchain from langchain.agents import AgentExecutor from langchain.callbacks import StreamlitCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.tools import PythonAstREPLTool from langchain.schema import SystemMessage from tools import PythonPlotTool from agents import OpenAIFunctionsAgentFix # https://github.com/langchain-ai/langchain/issues/6364 @st.cache_data def load_csv(csv) -> pd.DataFrame: return pd.read_csv(csv) def get_agent(df, openai_api_key, number_of_head_rows=5, outdir="./datavizqa/static"): SYSTEM_PROMPT = """You are working with a pandas dataframe in Python. The name of the dataframe is `df`. This is the result of `print(df.head())`: {df_head}""".format(df_head=str(df.head(number_of_head_rows).to_markdown())) tools = [ PythonPlotTool(locals={"df": df}, outdir=outdir), PythonAstREPLTool(name="python", locals={"df": df}), ] llm = ChatOpenAI( model="gpt-3.5-turbo-0613", openai_api_key=openai_api_key, temperature=0, streaming=True, ) agent = OpenAIFunctionsAgentFix.from_llm_and_tools( llm=llm, tools=tools, system_message=SystemMessage(content=SYSTEM_PROMPT), ) agent_exe = AgentExecutor.from_agent_and_tools(agent, tools) return agent_exe langchain.debug = os.getenv("LANGCHAIN_DEBUG") RE_MARKDOWN_IMAGE = r"!\[(.*?)\]\((.*?)\)" st.set_page_config(page_title="DataVizQA", page_icon="🤖") st.title("QA on your data with visualizations") custom_css = """ <style> p > img { width: 100% } </style> """ st.markdown(custom_css, unsafe_allow_html=True) ss = st.session_state with st.sidebar: ss.openai_api_key = st.text_input("Your OpenAI API key", placeholder="sk-xxxx") ss.cot = st.radio( "Expand new thoughts", [False, True], format_func=lambda x: "Yes" if x else "No") csv = st.file_uploader("Upload your CSV file", type=["csv"]) if csv is not None: df = load_csv(csv) st.dataframe(df.head()) if key := ss.openai_api_key or os.getenv("OPENAI_API_KEY"): ss.agent = get_agent(df, openai_api_key=key) if "agent" in ss: if "messages" not in ss: ss.messages = [{"role": "assistant", "content": "Data loaded! Ask me anything! I can also plot charts!"}] for message in ss.messages: st.chat_message(message["role"]).write(message["content"]) if question := st.chat_input(placeholder="Your question"): ss.messages.append({"role": "user", "content": question}) st.chat_message("user").write(question) with st.chat_message("assistant"): handler = StreamlitCallbackHandler(st.container(), expand_new_thoughts=ss.cot) output_image = "" for step in ss.agent.iter(question, callbacks=[handler]): if output := step.get("intermediate_step"): action, value = output[0] if action.tool == "python_plot": output_image = value answer = step.get("output") if output_image: if re.search(RE_MARKDOWN_IMAGE, answer): answer = re.sub(RE_MARKDOWN_IMAGE, f"![\g<1>]({output_image})", answer) else: answer = answer + "\n" + f"![{output_image.split('/')[0]}]({output_image})" ss.messages.append({"role": "assistant", "content": answer}) st.write(answer)
[ "langchain.agents.AgentExecutor.from_agent_and_tools", "langchain.tools.PythonAstREPLTool", "langchain.schema.SystemMessage", "langchain.chat_models.ChatOpenAI" ]
[((1411, 1439), 'os.getenv', 'os.getenv', (['"""LANGCHAIN_DEBUG"""'], {}), "('LANGCHAIN_DEBUG')\n", (1420, 1439), False, 'import os\n'), ((1486, 1543), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""DataVizQA"""', 'page_icon': '"""🤖"""'}), "(page_title='DataVizQA', page_icon='🤖')\n", (1504, 1543), True, 'import streamlit as st\n'), ((1544, 1591), 'streamlit.title', 'st.title', (['"""QA on your data with visualizations"""'], {}), "('QA on your data with visualizations')\n", (1552, 1591), True, 'import streamlit as st\n'), ((1670, 1717), 'streamlit.markdown', 'st.markdown', (['custom_css'], {'unsafe_allow_html': '(True)'}), '(custom_css, unsafe_allow_html=True)\n', (1681, 1717), True, 'import streamlit as st\n'), ((1964, 2018), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your CSV file"""'], {'type': "['csv']"}), "('Upload your CSV file', type=['csv'])\n", (1980, 2018), True, 'import streamlit as st\n'), ((513, 529), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (524, 529), True, 'import pandas as pd\n'), ((1005, 1109), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""', 'openai_api_key': 'openai_api_key', 'temperature': '(0)', 'streaming': '(True)'}), "(model='gpt-3.5-turbo-0613', openai_api_key=openai_api_key,\n temperature=0, streaming=True)\n", (1015, 1109), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1322, 1370), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', (['agent', 'tools'], {}), '(agent, tools)\n', (1356, 1370), False, 'from langchain.agents import AgentExecutor\n'), ((1783, 1842), 'streamlit.text_input', 'st.text_input', (['"""Your OpenAI API key"""'], {'placeholder': '"""sk-xxxx"""'}), "('Your OpenAI API key', placeholder='sk-xxxx')\n", (1796, 1842), True, 'import streamlit as st\n'), ((1856, 1950), 'streamlit.radio', 'st.radio', (['"""Expand new thoughts"""', '[False, True]'], {'format_func': "(lambda x: 'Yes' if x else 'No')"}), "('Expand new thoughts', [False, True], format_func=lambda x: 'Yes' if\n x else 'No')\n", (1864, 1950), True, 'import streamlit as st\n'), ((878, 926), 'tools.PythonPlotTool', 'PythonPlotTool', ([], {'locals': "{'df': df}", 'outdir': 'outdir'}), "(locals={'df': df}, outdir=outdir)\n", (892, 926), False, 'from tools import PythonPlotTool\n'), ((936, 987), 'langchain.tools.PythonAstREPLTool', 'PythonAstREPLTool', ([], {'name': '"""python"""', 'locals': "{'df': df}"}), "(name='python', locals={'df': df})\n", (953, 987), False, 'from langchain.tools import PythonAstREPLTool\n'), ((2488, 2530), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Your question"""'}), "(placeholder='Your question')\n", (2501, 2530), True, 'import streamlit as st\n'), ((1262, 1298), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'SYSTEM_PROMPT'}), '(content=SYSTEM_PROMPT)\n', (1275, 1298), False, 'from langchain.schema import SystemMessage\n'), ((2125, 2152), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2134, 2152), False, 'import os\n'), ((2659, 2687), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2674, 2687), True, 'import streamlit as st\n'), ((3507, 3523), 'streamlit.write', 'st.write', (['answer'], {}), '(answer)\n', (3515, 3523), True, 'import streamlit as st\n'), ((2409, 2441), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2424, 2441), True, 'import streamlit as st\n'), ((2606, 2629), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (2621, 2629), True, 'import streamlit as st\n'), ((2736, 2750), 'streamlit.container', 'st.container', ([], {}), '()\n', (2748, 2750), True, 'import streamlit as st\n'), ((3173, 3209), 're.search', 're.search', (['RE_MARKDOWN_IMAGE', 'answer'], {}), '(RE_MARKDOWN_IMAGE, answer)\n', (3182, 3209), False, 'import re\n'), ((3240, 3303), 're.sub', 're.sub', (['RE_MARKDOWN_IMAGE', 'f"""![\\\\g<1>]({output_image})"""', 'answer'], {}), "(RE_MARKDOWN_IMAGE, f'![\\\\g<1>]({output_image})', answer)\n", (3246, 3303), False, 'import re\n')]
import inspect from pathlib import Path from typing import List from langchain.chains import LLMChain from langchain.chat_models.base import BaseChatModel from langchain.prompts import PromptTemplate def get_documents(file_path: Path, llm: BaseChatModel): file_extension = file_path.suffix loader_class_name = get_best_loader(file_extension, llm) print(f"loader selected {loader_class_name} for {file_path}") if loader_class_name == "None": raise Exception(f"No loader found for {file_extension} files.") loader_class = get_loader_class(loader_class_name) loader = loader_class(str(file_path)) return loader.load() def get_loader_class(loader_class_name: str): import langchain.document_loaders loader_class = getattr(langchain.document_loaders, loader_class_name) return loader_class def get_best_loader(file_extension: str, llm: BaseChatModel): loaders = get_loaders() prompt = PromptTemplate( input_variables=["file_extension", "loaders"], template=""" Among the following loaders, which is the best to load a "{file_extension}" file? \ Only give me one the class name without any other special characters. If no relevant loader is found, respond "None". Loaders: {loaders} """, ) chain = LLMChain(llm=llm, prompt=prompt, output_key="loader_class_name") return chain({"file_extension": file_extension, "loaders": loaders})["loader_class_name"] def get_loaders() -> List[str]: import langchain_community.document_loaders loaders = [] for _, obj in inspect.getmembers(langchain_community.document_loaders): if inspect.isclass(obj): loaders.append(obj.__name__) return loaders
[ "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate" ]
[((946, 1275), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['file_extension', 'loaders']", 'template': '"""\n Among the following loaders, which is the best to load a "{file_extension}" file? Only give me one the class name without any other special characters. If no relevant loader is found, respond "None".\n\n Loaders: {loaders}\n """'}), '(input_variables=[\'file_extension\', \'loaders\'], template=\n """\n Among the following loaders, which is the best to load a "{file_extension}" file? Only give me one the class name without any other special characters. If no relevant loader is found, respond "None".\n\n Loaders: {loaders}\n """\n )\n', (960, 1275), False, 'from langchain.prompts import PromptTemplate\n'), ((1303, 1367), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'output_key': '"""loader_class_name"""'}), "(llm=llm, prompt=prompt, output_key='loader_class_name')\n", (1311, 1367), False, 'from langchain.chains import LLMChain\n'), ((1581, 1637), 'inspect.getmembers', 'inspect.getmembers', (['langchain_community.document_loaders'], {}), '(langchain_community.document_loaders)\n', (1599, 1637), False, 'import inspect\n'), ((1650, 1670), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (1665, 1670), False, 'import inspect\n')]
"""Streamlit app for the ChatGPT clone.""" import dotenv import langchain import streamlit as st import streamlit_chat dotenv.load_dotenv(dotenv.find_dotenv(), override=True) st.set_page_config( page_title='You Custom Assistant', page_icon='🤖' ) st.subheader('Your Custom ChatGPT 🤖') chat = langchain.chat_models.ChatOpenAI( model_name='gpt-3.5-turbo', temperature=0.5) # creating the messages (chat history) in the Streamlit session state if 'messages' not in st.session_state: st.session_state.messages = [] # creating the sidebar with st.sidebar: # streamlit text input widget for the system message (role) system_message = st.text_input(label='System role') # streamlit text input widget for the user message user_prompt = st.text_input(label='Send a message') if system_message: if not any(isinstance(x, langchain.schema.SystemMessage) for x in st.session_state.messages): st.session_state.messages.append( langchain.schema.SystemMessage(content=system_message) ) # if the user entered a question if user_prompt: st.session_state.messages.append( langchain.schema.HumanMessage(content=user_prompt) ) with st.spinner('Working on your request ...'): # creating the ChatGPT response response = chat(st.session_state.messages) # adding the response's content to the session state st.session_state.messages.append( langchain.schema.AIMessage(content=response.content)) # adding a default SystemMessage if the user didn't entered one if len(st.session_state.messages) >= 1: if not isinstance(st.session_state.messages[0], langchain.schema.SystemMessage): st.session_state.messages.insert(0, langchain.schema.SystemMessage( content='You are a helpful assistant.')) # displaying the messages (chat history) for i, msg in enumerate(st.session_state.messages[1:]): if i % 2 == 0: streamlit_chat.message(msg.content, is_user=True, key=f'{i} + 🤓') # user's question else: streamlit_chat.message(msg.content, is_user=False, key=f'{i} + 🤖') # ChatGPT response
[ "langchain.schema.AIMessage", "langchain.schema.HumanMessage", "langchain.schema.SystemMessage", "langchain.chat_models.ChatOpenAI" ]
[((178, 246), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""You Custom Assistant"""', 'page_icon': '"""🤖"""'}), "(page_title='You Custom Assistant', page_icon='🤖')\n", (196, 246), True, 'import streamlit as st\n'), ((257, 294), 'streamlit.subheader', 'st.subheader', (['"""Your Custom ChatGPT 🤖"""'], {}), "('Your Custom ChatGPT 🤖')\n", (269, 294), True, 'import streamlit as st\n'), ((303, 380), 'langchain.chat_models.ChatOpenAI', 'langchain.chat_models.ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)'}), "(model_name='gpt-3.5-turbo', temperature=0.5)\n", (335, 380), False, 'import langchain\n'), ((140, 160), 'dotenv.find_dotenv', 'dotenv.find_dotenv', ([], {}), '()\n', (158, 160), False, 'import dotenv\n'), ((657, 691), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""System role"""'}), "(label='System role')\n", (670, 691), True, 'import streamlit as st\n'), ((765, 802), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Send a message"""'}), "(label='Send a message')\n", (778, 802), True, 'import streamlit as st\n'), ((2004, 2069), 'streamlit_chat.message', 'streamlit_chat.message', (['msg.content'], {'is_user': '(True)', 'key': 'f"""{i} + 🤓"""'}), "(msg.content, is_user=True, key=f'{i} + 🤓')\n", (2026, 2069), False, 'import streamlit_chat\n'), ((2138, 2205), 'streamlit_chat.message', 'streamlit_chat.message', (['msg.content'], {'is_user': '(False)', 'key': 'f"""{i} + 🤖"""'}), "(msg.content, is_user=False, key=f'{i} + 🤖')\n", (2160, 2205), False, 'import streamlit_chat\n'), ((1172, 1222), 'langchain.schema.HumanMessage', 'langchain.schema.HumanMessage', ([], {'content': 'user_prompt'}), '(content=user_prompt)\n', (1201, 1222), False, 'import langchain\n'), ((1247, 1288), 'streamlit.spinner', 'st.spinner', (['"""Working on your request ..."""'], {}), "('Working on your request ...')\n", (1257, 1288), True, 'import streamlit as st\n'), ((1505, 1557), 'langchain.schema.AIMessage', 'langchain.schema.AIMessage', ([], {'content': 'response.content'}), '(content=response.content)\n', (1531, 1557), False, 'import langchain\n'), ((1794, 1864), 'langchain.schema.SystemMessage', 'langchain.schema.SystemMessage', ([], {'content': '"""You are a helpful assistant."""'}), "(content='You are a helpful assistant.')\n", (1824, 1864), False, 'import langchain\n'), ((991, 1045), 'langchain.schema.SystemMessage', 'langchain.schema.SystemMessage', ([], {'content': 'system_message'}), '(content=system_message)\n', (1021, 1045), False, 'import langchain\n')]
import streamlit as st import langchain from langchain.chains import ConversationChain from langchain.chains.conversation.memory import ConversationEntityMemory from langchain.chains.conversation.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE from langchain.chat_models import ChatOpenAI def initializer(): if "messages" not in st.session_state: st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}] if "chatbot_api_key" not in st.session_state: st.session_state["chatbot_api_key"] = "" return True def main(): st.title("💬 simple Chatbot") for msg in st.session_state.messages: st.chat_message(msg["role"]).write(msg["content"]) if prompt:=st.chat_input(): if not openai_api_key: st.info("Please add your OpenAI API key to continue.") st.stop() else : st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) output = Conversation.run(input=prompt) st.session_state.messages.append({"role": "assistant", "content": output}) st.chat_message("assistant").write(output) if __name__ == '__main__': st.set_page_config(page_title="Chatbot",layout="centered") with st.sidebar: openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password") session_initilizer= initializer() if st.session_state["chatbot_api_key"]!="": model = 'gpt-3.5-turbo' llmObj = ChatOpenAI(openai_api_key=st.session_state["chatbot_api_key"], model_name=model) k= 5 if 'entity_memory' not in st.session_state: st.session_state.entity_memory = ConversationEntityMemory( llm=llmObj, k=k) Conversation = ConversationChain( llm=llmObj, prompt=ENTITY_MEMORY_CONVERSATION_TEMPLATE, memory=st.session_state.entity_memory ) main() else: st.title("Please provide API key to run the chatbot")
[ "langchain.chains.ConversationChain", "langchain.chains.conversation.memory.ConversationEntityMemory", "langchain.chat_models.ChatOpenAI" ]
[((603, 631), 'streamlit.title', 'st.title', (['"""💬 simple Chatbot"""'], {}), "('💬 simple Chatbot')\n", (611, 631), True, 'import streamlit as st\n'), ((1260, 1319), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chatbot"""', 'layout': '"""centered"""'}), "(page_title='Chatbot', layout='centered')\n", (1278, 1319), True, 'import streamlit as st\n'), ((750, 765), 'streamlit.chat_input', 'st.chat_input', ([], {}), '()\n', (763, 765), True, 'import streamlit as st\n'), ((1365, 1436), 'streamlit.text_input', 'st.text_input', (['"""OpenAI API Key"""'], {'key': '"""chatbot_api_key"""', 'type': '"""password"""'}), "('OpenAI API Key', key='chatbot_api_key', type='password')\n", (1378, 1436), True, 'import streamlit as st\n'), ((1572, 1657), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': "st.session_state['chatbot_api_key']", 'model_name': 'model'}), "(openai_api_key=st.session_state['chatbot_api_key'], model_name=model\n )\n", (1582, 1657), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1873, 1989), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'llm': 'llmObj', 'prompt': 'ENTITY_MEMORY_CONVERSATION_TEMPLATE', 'memory': 'st.session_state.entity_memory'}), '(llm=llmObj, prompt=ENTITY_MEMORY_CONVERSATION_TEMPLATE,\n memory=st.session_state.entity_memory)\n', (1890, 1989), False, 'from langchain.chains import ConversationChain\n'), ((2097, 2150), 'streamlit.title', 'st.title', (['"""Please provide API key to run the chatbot"""'], {}), "('Please provide API key to run the chatbot')\n", (2105, 2150), True, 'import streamlit as st\n'), ((810, 864), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (817, 864), True, 'import streamlit as st\n'), ((877, 886), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (884, 886), True, 'import streamlit as st\n'), ((914, 983), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (946, 983), True, 'import streamlit as st\n'), ((1098, 1172), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': output}"], {}), "({'role': 'assistant', 'content': output})\n", (1130, 1172), True, 'import streamlit as st\n'), ((1783, 1824), 'langchain.chains.conversation.memory.ConversationEntityMemory', 'ConversationEntityMemory', ([], {'llm': 'llmObj', 'k': 'k'}), '(llm=llmObj, k=k)\n', (1807, 1824), False, 'from langchain.chains.conversation.memory import ConversationEntityMemory\n'), ((683, 711), 'streamlit.chat_message', 'st.chat_message', (["msg['role']"], {}), "(msg['role'])\n", (698, 711), True, 'import streamlit as st\n'), ((996, 1019), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (1011, 1019), True, 'import streamlit as st\n'), ((1185, 1213), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (1200, 1213), True, 'import streamlit as st\n')]
import streamlit as st import langchain_helper st.title("Ingredients for the Dish") diet = st.sidebar.selectbox("Pick a Diet", ("Vegetarian", "Non-Vegetarian", "Vegan", "Eggitarian", "Carnivore")) if diet: response = langchain_helper.generate_dish_name_and_ingredients(diet) st.header(response['dish'].strip()) items = response['ingredients'].strip().split(",") st.write("**Ingredients needed**") for item in items: st.write("-", item)
[ "langchain_helper.generate_dish_name_and_ingredients" ]
[((48, 84), 'streamlit.title', 'st.title', (['"""Ingredients for the Dish"""'], {}), "('Ingredients for the Dish')\n", (56, 84), True, 'import streamlit as st\n'), ((93, 202), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Pick a Diet"""', "('Vegetarian', 'Non-Vegetarian', 'Vegan', 'Eggitarian', 'Carnivore')"], {}), "('Pick a Diet', ('Vegetarian', 'Non-Vegetarian',\n 'Vegan', 'Eggitarian', 'Carnivore'))\n", (113, 202), True, 'import streamlit as st\n'), ((224, 281), 'langchain_helper.generate_dish_name_and_ingredients', 'langchain_helper.generate_dish_name_and_ingredients', (['diet'], {}), '(diet)\n', (275, 281), False, 'import langchain_helper\n'), ((381, 415), 'streamlit.write', 'st.write', (['"""**Ingredients needed**"""'], {}), "('**Ingredients needed**')\n", (389, 415), True, 'import streamlit as st\n'), ((447, 466), 'streamlit.write', 'st.write', (['"""-"""', 'item'], {}), "('-', item)\n", (455, 466), True, 'import streamlit as st\n')]
from dotenv import load_dotenv import langchain from langchain.chat_models import ChatOpenAI from langchain.agents import initialize_agent, AgentType from agent.tools.ontology import ontology_tool from agent.tools.interview import PAInterview import os from langchain.prompts import MessagesPlaceholder from langchain.memory import ConversationBufferMemory agent_kwargs = { "extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")], } memory = ConversationBufferMemory(memory_key="memory", return_messages=True) #langchain.debug = True load_dotenv() openai_api_key=os.environ['OPENAI_API_KEY'] # Because we are using functions, we need to use model gpt-4-0613 llm=ChatOpenAI(openai_api_key=openai_api_key,temperature=0, model="gpt-4-0613") tools = [ontology_tool,PAInterview()] agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True, agent_kwargs=agent_kwargs, memory=memory)
[ "langchain.agents.initialize_agent", "langchain.memory.ConversationBufferMemory", "langchain.prompts.MessagesPlaceholder", "langchain.chat_models.ChatOpenAI" ]
[((462, 529), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""memory"""', 'return_messages': '(True)'}), "(memory_key='memory', return_messages=True)\n", (486, 529), False, 'from langchain.memory import ConversationBufferMemory\n'), ((555, 568), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (566, 568), False, 'from dotenv import load_dotenv\n'), ((684, 760), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'openai_api_key', 'temperature': '(0)', 'model': '"""gpt-4-0613"""'}), "(openai_api_key=openai_api_key, temperature=0, model='gpt-4-0613')\n", (694, 760), False, 'from langchain.chat_models import ChatOpenAI\n'), ((808, 930), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'agent_kwargs': 'agent_kwargs', 'memory': 'memory'}), '(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True,\n agent_kwargs=agent_kwargs, memory=memory)\n', (824, 930), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((784, 797), 'agent.tools.interview.PAInterview', 'PAInterview', ([], {}), '()\n', (795, 797), False, 'from agent.tools.interview import PAInterview\n'), ((405, 448), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""memory"""'}), "(variable_name='memory')\n", (424, 448), False, 'from langchain.prompts import MessagesPlaceholder\n')]
import langchain_helper as lch import streamlit as st import time import pypdf from bs4 import BeautifulSoup import base64 import utilities as utl import streamlit.components.v1 as components st.set_page_config( page_icon='CG-Logo.png', layout="wide", page_title='Consent Guardian', initial_sidebar_state="expanded" ) def get_image_base64(path): with open(path, "rb") as image_file: encoded = base64.b64encode(image_file.read()).decode() return "data:image/png;base64," + encoded image_path = 'CG-Logo.png' base64_image = get_image_base64(image_path) html_content = f""" <div class="header" style="text-align: center;"> <figure> <img src="{base64_image}" alt="Logo" style="height: 200px;" /> <h1>Welcome to Consent Guardian!</h1> </figure> <h3>Your shield in the digital realm!</h3> </div> """ st.markdown(html_content, unsafe_allow_html=True) def load_css(): #Function to load CSS. utl.local_css("styles.css") utl.remote_css('https://fonts.googleapis.com/icon?family=Material+Icons') utl.remote_css('https://fonts.googleapis.com/css2?family=Red+Hat+Display:wght@300;400;500;600;700&display=swap') load_css() # Function to read different document types def read_pdf(file): reader = pypdf.PdfReader(file) text = "" for page in range(len(reader.pages)): text += reader.pages[page].extract_text() + "\n" return text def read_html(file): soup = BeautifulSoup(file, 'html.parser') return soup.get_text() def run_streamlit_app(): if 'processing' not in st.session_state: st.session_state.processing = False if 'feedback_given' not in st.session_state: st.session_state.feedback_given = False if 'initial_response' not in st.session_state: st.session_state.initial_response = None # Ask for the OpenAI API key openai_api_key = st.sidebar.text_input("Enter your OpenAI API Key", type="password") if not openai_api_key: st.sidebar.markdown("[Get an OpenAI API key](https://platform.openai.com/account/api-keys)") with st.expander("How to use 🦾", expanded=False): st.markdown( """ Please refer to [our dedicated guide](https://github.com/JohnsL-U/Consent-Guardian/blob/main/README.md). """ ) # Chatbot Interface st.markdown("## 🤖 Guardian Chatbot") chatbot_query = st.text_input("Ask me anything about the document:") if st.button("Ask Guardian"): if 'uploaded_document' in st.session_state: # Process the chatbot query chatbot_response = lch.process_chatbot_query(chatbot_query, st.session_state.uploaded_document, openai_api_key) st.text_area("Chatbot Response:", value=chatbot_response, height=100) else: st.warning("Please upload a document first.") uploaded_file = st.sidebar.file_uploader("Upload your document", type=['txt', 'pdf', 'docx']) document_content = None if uploaded_file is not None: if uploaded_file.type == "application/pdf": document_content = read_pdf(uploaded_file) elif uploaded_file.type == "text/html": document_content = read_html(uploaded_file.getvalue().decode("utf-8")) else: document_content = uploaded_file.read().decode("utf-8") # Update session state with the uploaded document st.session_state.uploaded_document = document_content # Select the type of summary needed for the legal document summary_type = st.sidebar.selectbox("Select the type of summary you need for your document:", ("Executive Summary", "Key Points", "Detailed Analysis")) # Process Button if st.sidebar.button('Process') and openai_api_key and document_content: st.session_state.processing = True st.session_state.feedback_given = False progress_bar = st.progress(0) for percentage_completed in range(100): time.sleep(0.05) progress_bar.progress(percentage_completed + 1) response = lch.process_document(summary_type, openai_api_key, document_content) st.session_state.initial_response = response.get("response", "") st.success("Document processed successfully!") st.markdown("## Analysis Results") st.text_area("#### Uploaded Document:", value=document_content, height=200) st.text_area("#### Response:", value=st.session_state.initial_response, height=200) # Sidebar - Feedback Section if st.session_state.processing and not st.session_state.feedback_given: st.sidebar.markdown("### Feedback") if st.sidebar.button('👍'): st.sidebar.info("Thank you for your feedback!") elif st.sidebar.button('👎'): improved_response = lch.process_document(summary_type, openai_api_key, document_content, feedback='negative', initial_response=st.session_state.initial_response) # Store improved results and display in the main section if 'improved_response' in locals(): st.session_state.improved_results = improved_response.get("response", "") st.success("Document reprocessed successfully!") st.text_area("### Improved Analysis Results", value=st.session_state.improved_results, height=200) # Additional UI Elements if not document_content: st.warning("Please upload a document.") elif not openai_api_key: st.warning("Please add your OpenAI API key to continue.") if __name__ == "__main__": run_streamlit_app()
[ "langchain_helper.process_document", "langchain_helper.process_chatbot_query" ]
[((194, 322), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_icon': '"""CG-Logo.png"""', 'layout': '"""wide"""', 'page_title': '"""Consent Guardian"""', 'initial_sidebar_state': '"""expanded"""'}), "(page_icon='CG-Logo.png', layout='wide', page_title=\n 'Consent Guardian', initial_sidebar_state='expanded')\n", (212, 322), True, 'import streamlit as st\n'), ((845, 894), 'streamlit.markdown', 'st.markdown', (['html_content'], {'unsafe_allow_html': '(True)'}), '(html_content, unsafe_allow_html=True)\n', (856, 894), True, 'import streamlit as st\n'), ((939, 966), 'utilities.local_css', 'utl.local_css', (['"""styles.css"""'], {}), "('styles.css')\n", (952, 966), True, 'import utilities as utl\n'), ((969, 1042), 'utilities.remote_css', 'utl.remote_css', (['"""https://fonts.googleapis.com/icon?family=Material+Icons"""'], {}), "('https://fonts.googleapis.com/icon?family=Material+Icons')\n", (983, 1042), True, 'import utilities as utl\n'), ((1045, 1167), 'utilities.remote_css', 'utl.remote_css', (['"""https://fonts.googleapis.com/css2?family=Red+Hat+Display:wght@300;400;500;600;700&display=swap"""'], {}), "(\n 'https://fonts.googleapis.com/css2?family=Red+Hat+Display:wght@300;400;500;600;700&display=swap'\n )\n", (1059, 1167), True, 'import utilities as utl\n'), ((1246, 1267), 'pypdf.PdfReader', 'pypdf.PdfReader', (['file'], {}), '(file)\n', (1261, 1267), False, 'import pypdf\n'), ((1420, 1454), 'bs4.BeautifulSoup', 'BeautifulSoup', (['file', '"""html.parser"""'], {}), "(file, 'html.parser')\n", (1433, 1454), False, 'from bs4 import BeautifulSoup\n'), ((1827, 1894), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Enter your OpenAI API Key"""'], {'type': '"""password"""'}), "('Enter your OpenAI API Key', type='password')\n", (1848, 1894), True, 'import streamlit as st\n'), ((2254, 2290), 'streamlit.markdown', 'st.markdown', (['"""## 🤖 Guardian Chatbot"""'], {}), "('## 🤖 Guardian Chatbot')\n", (2265, 2290), True, 'import streamlit as st\n'), ((2309, 2361), 'streamlit.text_input', 'st.text_input', (['"""Ask me anything about the document:"""'], {}), "('Ask me anything about the document:')\n", (2322, 2361), True, 'import streamlit as st\n'), ((2367, 2392), 'streamlit.button', 'st.button', (['"""Ask Guardian"""'], {}), "('Ask Guardian')\n", (2376, 2392), True, 'import streamlit as st\n'), ((2771, 2848), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload your document"""'], {'type': "['txt', 'pdf', 'docx']"}), "('Upload your document', type=['txt', 'pdf', 'docx'])\n", (2795, 2848), True, 'import streamlit as st\n'), ((3389, 3529), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select the type of summary you need for your document:"""', "('Executive Summary', 'Key Points', 'Detailed Analysis')"], {}), "('Select the type of summary you need for your document:',\n ('Executive Summary', 'Key Points', 'Detailed Analysis'))\n", (3409, 3529), True, 'import streamlit as st\n'), ((1926, 2023), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"""'], {}), "(\n '[Get an OpenAI API key](https://platform.openai.com/account/api-keys)')\n", (1945, 2023), True, 'import streamlit as st\n'), ((2027, 2070), 'streamlit.expander', 'st.expander', (['"""How to use 🦾"""'], {'expanded': '(False)'}), "('How to use 🦾', expanded=False)\n", (2038, 2070), True, 'import streamlit as st\n'), ((2077, 2224), 'streamlit.markdown', 'st.markdown', (['"""\n Please refer to [our dedicated guide](https://github.com/JohnsL-U/Consent-Guardian/blob/main/README.md).\n """'], {}), '(\n """\n Please refer to [our dedicated guide](https://github.com/JohnsL-U/Consent-Guardian/blob/main/README.md).\n """\n )\n', (2088, 2224), True, 'import streamlit as st\n'), ((3551, 3579), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Process"""'], {}), "('Process')\n", (3568, 3579), True, 'import streamlit as st\n'), ((3729, 3743), 'streamlit.progress', 'st.progress', (['(0)'], {}), '(0)\n', (3740, 3743), True, 'import streamlit as st\n'), ((3892, 3960), 'langchain_helper.process_document', 'lch.process_document', (['summary_type', 'openai_api_key', 'document_content'], {}), '(summary_type, openai_api_key, document_content)\n', (3912, 3960), True, 'import langchain_helper as lch\n'), ((4038, 4084), 'streamlit.success', 'st.success', (['"""Document processed successfully!"""'], {}), "('Document processed successfully!')\n", (4048, 4084), True, 'import streamlit as st\n'), ((4091, 4125), 'streamlit.markdown', 'st.markdown', (['"""## Analysis Results"""'], {}), "('## Analysis Results')\n", (4102, 4125), True, 'import streamlit as st\n'), ((4132, 4207), 'streamlit.text_area', 'st.text_area', (['"""#### Uploaded Document:"""'], {'value': 'document_content', 'height': '(200)'}), "('#### Uploaded Document:', value=document_content, height=200)\n", (4144, 4207), True, 'import streamlit as st\n'), ((4214, 4301), 'streamlit.text_area', 'st.text_area', (['"""#### Response:"""'], {'value': 'st.session_state.initial_response', 'height': '(200)'}), "('#### Response:', value=st.session_state.initial_response,\n height=200)\n", (4226, 4301), True, 'import streamlit as st\n'), ((4409, 4444), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""### Feedback"""'], {}), "('### Feedback')\n", (4428, 4444), True, 'import streamlit as st\n'), ((4452, 4474), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""👍"""'], {}), "('👍')\n", (4469, 4474), True, 'import streamlit as st\n'), ((5164, 5203), 'streamlit.warning', 'st.warning', (['"""Please upload a document."""'], {}), "('Please upload a document.')\n", (5174, 5203), True, 'import streamlit as st\n'), ((2511, 2607), 'langchain_helper.process_chatbot_query', 'lch.process_chatbot_query', (['chatbot_query', 'st.session_state.uploaded_document', 'openai_api_key'], {}), '(chatbot_query, st.session_state.uploaded_document,\n openai_api_key)\n', (2536, 2607), True, 'import langchain_helper as lch\n'), ((2614, 2683), 'streamlit.text_area', 'st.text_area', (['"""Chatbot Response:"""'], {'value': 'chatbot_response', 'height': '(100)'}), "('Chatbot Response:', value=chatbot_response, height=100)\n", (2626, 2683), True, 'import streamlit as st\n'), ((2706, 2751), 'streamlit.warning', 'st.warning', (['"""Please upload a document first."""'], {}), "('Please upload a document first.')\n", (2716, 2751), True, 'import streamlit as st\n'), ((3800, 3816), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (3810, 3816), False, 'import time\n'), ((4484, 4531), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""Thank you for your feedback!"""'], {}), "('Thank you for your feedback!')\n", (4499, 4531), True, 'import streamlit as st\n'), ((4542, 4564), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""👎"""'], {}), "('👎')\n", (4559, 4564), True, 'import streamlit as st\n'), ((5237, 5294), 'streamlit.warning', 'st.warning', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (5247, 5294), True, 'import streamlit as st\n'), ((4594, 4739), 'langchain_helper.process_document', 'lch.process_document', (['summary_type', 'openai_api_key', 'document_content'], {'feedback': '"""negative"""', 'initial_response': 'st.session_state.initial_response'}), "(summary_type, openai_api_key, document_content,\n feedback='negative', initial_response=st.session_state.initial_response)\n", (4614, 4739), True, 'import langchain_helper as lch\n'), ((4943, 4991), 'streamlit.success', 'st.success', (['"""Document reprocessed successfully!"""'], {}), "('Document reprocessed successfully!')\n", (4953, 4991), True, 'import streamlit as st\n'), ((5004, 5107), 'streamlit.text_area', 'st.text_area', (['"""### Improved Analysis Results"""'], {'value': 'st.session_state.improved_results', 'height': '(200)'}), "('### Improved Analysis Results', value=st.session_state.\n improved_results, height=200)\n", (5016, 5107), True, 'import streamlit as st\n')]
from langchain.vectorstores import chroma from langchain.embeddings import OpenAIEmbeddings from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from redudant_filter_retriever import RedundantFilterRetriever from dotenv import load_dotenv import langchain langchain.debug = True load_dotenv() chat = ChatOpenAI() embeddings = OpenAIEmbeddings() db = chroma.Chroma( persist_directory="emb", embedding_function=embeddings ) # retriever = RedundantFilterRetriever( # embeddings=embeddings, # chroma=db # ) retriever = db.as_retriever() chain = RetrievalQA.from_chain_type( llm=chat, retriever=retriever, chain_type="stuff", ) result = chain.run("What is an interesting fact about the english language?") print(result)
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.vectorstores.chroma.Chroma", "langchain.chains.RetrievalQA.from_chain_type", "langchain.chat_models.ChatOpenAI" ]
[((314, 327), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (325, 327), False, 'from dotenv import load_dotenv\n'), ((336, 348), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (346, 348), False, 'from langchain.chat_models import ChatOpenAI\n'), ((363, 381), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (379, 381), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((387, 456), 'langchain.vectorstores.chroma.Chroma', 'chroma.Chroma', ([], {'persist_directory': '"""emb"""', 'embedding_function': 'embeddings'}), "(persist_directory='emb', embedding_function=embeddings)\n", (400, 456), False, 'from langchain.vectorstores import chroma\n'), ((597, 675), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'retriever': 'retriever', 'chain_type': '"""stuff"""'}), "(llm=chat, retriever=retriever, chain_type='stuff')\n", (624, 675), False, 'from langchain.chains import RetrievalQA\n')]
"""Chat agent with question answering """ from dotenv import load_dotenv from langchain.cache import InMemoryCache import langchain import os from dataclasses import dataclass from langchain.chains import LLMChain, LLMRequestsChain from langchain import Wikipedia, OpenAI from langchain.agents.react.base import DocstoreExplorer from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent from langchain.prompts import PromptTemplate from langchain.chains.conversation.memory import ConversationBufferMemory from langchain.agents.conversational.base import ConversationalAgent from datetime import datetime # Load the environment variables load_dotenv() langchain.llm_cache = InMemoryCache() news_api_key = os.getenv("NEWS_API_KEY") @dataclass class ChatAgent: agent_executor: AgentExecutor = None def _get_docstore_agent(self): docstore = DocstoreExplorer(Wikipedia()) docstore_tools = [ Tool( name="Search", func=docstore.search ), Tool( name="Lookup", func=docstore.lookup ) ] docstore_llm = OpenAI(temperature=0, model_name="text-davinci-003") docstore_agent = initialize_agent( docstore_tools, docstore_llm, agent="react-docstore", verbose=True) return docstore_agent def _get_requests_llm_tool(self): template = """ Extracted: {requests_result}""" PROMPT = PromptTemplate( input_variables=["requests_result"], template=template, ) def lambda_func(input): out = chain = LLMRequestsChain(llm_chain=LLMChain( llm=OpenAI(temperature=0), prompt=PROMPT)).run(input) return out.strip() return lambda_func def __init__(self, *, conversation_chain: LLMChain = None, history_array): date = datetime.today().strftime('%A %d, %B, %Y, %I:%M%p') print("DATETIME:", date) # set up a Wikipedia docstore agent docstore_agent = self._get_docstore_agent() tool_names = get_all_tool_names() tool_names.remove("pal-math") tool_names.remove("requests") # let's use the llm_requests instead # let's use the llm_requests instead tool_names.remove("google-search") tool_names.remove("pal-colored-objects") tool_names.remove("python_repl") tool_names.remove("terminal") tool_names.remove("serpapi"), tool_names.remove("tmdb-api") requests_tool = self._get_requests_llm_tool() print("ALL TOOLS:", tool_names) tools = load_tools(tool_names, llm=OpenAI(temperature=0, model_name="text-davinci-003"), news_api_key=news_api_key, ) # Tweak some of the tool descriptions for tool in tools: if tool.name == "Search": tool.description = "Use this tool exclusively for questions relating to current events, or when you can't find an answer using any of the other tools." if tool.name == "Calculator": tool.description = "Use this to solve numeric math questions and do arithmetic. Don't use it for general or abstract math questions." tools = tools + [ Tool( name="WikipediaSearch", description="Useful for answering a wide range of factual, scientific, academic, political and historical questions.", func=docstore_agent.run ), Tool( name="Requests", func=requests_tool, description="A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page." ) ] ai_prefix = "FWROG-E" human_prefix = "Bence" prefix = os.getenv("PROMPT_PREFIX") suffix = f""" The person's name that you are interacting with is {human_prefix}. Please be entertaining and respectful towards him. The current date is {date}. Questions that refer to a specific date or time period will be interpreted relative to this date. After you answer the question, you MUST to determine which langauge your answer is written in, and append the language code to the end of the Final Answer, within parentheses, like this (en-US). Begin! Previous conversation history: {{chat_history}} New input: {{input}} {{agent_scratchpad}} """ memory = ConversationBufferMemory(memory_key="chat_history") for item in history_array: memory.save_context( {f"{ai_prefix}": item["prompt"]}, {f"{human_prefix}": item["response"]}) llm = OpenAI(temperature=.5, max_tokens=384, model_name="text-davinci-003") llm_chain = LLMChain( llm=llm, prompt=ConversationalAgent.create_prompt( tools, prefix=prefix, ai_prefix=ai_prefix, human_prefix=human_prefix, suffix=suffix ), ) agent_obj = ConversationalAgent( llm_chain=llm_chain, ai_prefix=ai_prefix) self.agent_executor = AgentExecutor.from_agent_and_tools( agent=agent_obj, tools=tools, verbose=True, max_iterations=5, memory=memory)
[ "langchain.agents.initialize_agent", "langchain.agents.AgentExecutor.from_agent_and_tools", "langchain.cache.InMemoryCache", "langchain.Wikipedia", "langchain.agents.conversational.base.ConversationalAgent", "langchain.agents.conversational.base.ConversationalAgent.create_prompt", "langchain.agents.Tool", "langchain.chains.conversation.memory.ConversationBufferMemory", "langchain.agents.get_all_tool_names", "langchain.prompts.PromptTemplate", "langchain.OpenAI" ]
[((681, 694), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (692, 694), False, 'from dotenv import load_dotenv\n'), ((718, 733), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (731, 733), False, 'from langchain.cache import InMemoryCache\n'), ((749, 774), 'os.getenv', 'os.getenv', (['"""NEWS_API_KEY"""'], {}), "('NEWS_API_KEY')\n", (758, 774), False, 'import os\n'), ((1192, 1244), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0, model_name='text-davinci-003')\n", (1198, 1244), False, 'from langchain import Wikipedia, OpenAI\n'), ((1270, 1358), 'langchain.agents.initialize_agent', 'initialize_agent', (['docstore_tools', 'docstore_llm'], {'agent': '"""react-docstore"""', 'verbose': '(True)'}), "(docstore_tools, docstore_llm, agent='react-docstore',\n verbose=True)\n", (1286, 1358), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((1519, 1589), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['requests_result']", 'template': 'template'}), "(input_variables=['requests_result'], template=template)\n", (1533, 1589), False, 'from langchain.prompts import PromptTemplate\n'), ((2164, 2184), 'langchain.agents.get_all_tool_names', 'get_all_tool_names', ([], {}), '()\n', (2182, 2184), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((4068, 4094), 'os.getenv', 'os.getenv', (['"""PROMPT_PREFIX"""'], {}), "('PROMPT_PREFIX')\n", (4077, 4094), False, 'import os\n'), ((4692, 4743), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""'}), "(memory_key='chat_history')\n", (4716, 4743), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((4916, 4986), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'max_tokens': '(384)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0.5, max_tokens=384, model_name='text-davinci-003')\n", (4922, 4986), False, 'from langchain import Wikipedia, OpenAI\n'), ((5322, 5383), 'langchain.agents.conversational.base.ConversationalAgent', 'ConversationalAgent', ([], {'llm_chain': 'llm_chain', 'ai_prefix': 'ai_prefix'}), '(llm_chain=llm_chain, ai_prefix=ai_prefix)\n', (5341, 5383), False, 'from langchain.agents.conversational.base import ConversationalAgent\n'), ((5428, 5544), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent_obj', 'tools': 'tools', 'verbose': '(True)', 'max_iterations': '(5)', 'memory': 'memory'}), '(agent=agent_obj, tools=tools, verbose=\n True, max_iterations=5, memory=memory)\n', (5462, 5544), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((918, 929), 'langchain.Wikipedia', 'Wikipedia', ([], {}), '()\n', (927, 929), False, 'from langchain import Wikipedia, OpenAI\n'), ((970, 1011), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'docstore.search'}), "(name='Search', func=docstore.search)\n", (974, 1011), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((1071, 1112), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Lookup"""', 'func': 'docstore.lookup'}), "(name='Lookup', func=docstore.lookup)\n", (1075, 1112), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((1960, 1976), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1974, 1976), False, 'from datetime import datetime\n'), ((2758, 2810), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0, model_name='text-davinci-003')\n", (2764, 2810), False, 'from langchain import Wikipedia, OpenAI\n'), ((3444, 3626), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""WikipediaSearch"""', 'description': '"""Useful for answering a wide range of factual, scientific, academic, political and historical questions."""', 'func': 'docstore_agent.run'}), "(name='WikipediaSearch', description=\n 'Useful for answering a wide range of factual, scientific, academic, political and historical questions.'\n , func=docstore_agent.run)\n", (3448, 3626), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((3692, 3926), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Requests"""', 'func': 'requests_tool', 'description': '"""A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page."""'}), "(name='Requests', func=requests_tool, description=\n 'A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page.'\n )\n", (3696, 3926), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((5077, 5199), 'langchain.agents.conversational.base.ConversationalAgent.create_prompt', 'ConversationalAgent.create_prompt', (['tools'], {'prefix': 'prefix', 'ai_prefix': 'ai_prefix', 'human_prefix': 'human_prefix', 'suffix': 'suffix'}), '(tools, prefix=prefix, ai_prefix=ai_prefix,\n human_prefix=human_prefix, suffix=suffix)\n', (5110, 5199), False, 'from langchain.agents.conversational.base import ConversationalAgent\n'), ((1741, 1762), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1747, 1762), False, 'from langchain import Wikipedia, OpenAI\n')]
"""Beta Feature: base interface for cache.""" from __future__ import annotations import hashlib import inspect import json import logging import warnings from abc import ABC, abstractmethod from datetime import timedelta from typing import ( TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union, cast, ) from sqlalchemy import Column, Integer, String, create_engine, select from sqlalchemy.engine.base import Engine from sqlalchemy.orm import Session from langchain.utils import get_from_env try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain.embeddings.base import Embeddings from langchain.load.dump import dumps from langchain.load.load import loads from langchain.schema import ChatGeneration, Generation from langchain.vectorstores.redis import Redis as RedisVectorstore logger = logging.getLogger(__file__) if TYPE_CHECKING: import momento RETURN_VAL_TYPE = Sequence[Generation] def _hash(_input: str) -> str: """Use a deterministic hashing approach.""" return hashlib.md5(_input.encode()).hexdigest() def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str: """Dump generations to json. Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: Json representing a list of generations. """ return json.dumps([generation.dict() for generation in generations]) def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE: """Load generations from json. Args: generations_json (str): A string of json representing a list of generations. Raises: ValueError: Could not decode json string to list of generations. Returns: RETURN_VAL_TYPE: A list of generations. """ try: results = json.loads(generations_json) return [Generation(**generation_dict) for generation_dict in results] except json.JSONDecodeError: raise ValueError( f"Could not decode json to list of generations: {generations_json}" ) class BaseCache(ABC): """Base interface for cache.""" @abstractmethod def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" @abstractmethod def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" @abstractmethod def clear(self, **kwargs: Any) -> None: """Clear cache that can take additional keyword arguments.""" class InMemoryCache(BaseCache): """Cache that stores things in memory.""" def __init__(self) -> None: """Initialize with empty cache.""" self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" return self._cache.get((prompt, llm_string), None) def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" self._cache[(prompt, llm_string)] = return_val def clear(self, **kwargs: Any) -> None: """Clear cache.""" self._cache = {} Base = declarative_base() class FullLLMCache(Base): # type: ignore """SQLite table for full LLM Cache (all generations).""" __tablename__ = "full_llm_cache" prompt = Column(String, primary_key=True) llm = Column(String, primary_key=True) idx = Column(Integer, primary_key=True) response = Column(String) class SQLAlchemyCache(BaseCache): """Cache that uses SQAlchemy as a backend.""" def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache): """Initialize by creating all tables.""" self.engine = engine self.cache_schema = cache_schema self.cache_schema.metadata.create_all(self.engine) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" stmt = ( select(self.cache_schema.response) .where(self.cache_schema.prompt == prompt) # type: ignore .where(self.cache_schema.llm == llm_string) .order_by(self.cache_schema.idx) ) with Session(self.engine) as session: rows = session.execute(stmt).fetchall() if rows: try: return [loads(row[0]) for row in rows] except Exception: logger.warning( "Retrieving a cache value that could not be deserialized " "properly. This is likely due to the cache being in an " "older format. Please recreate your cache to avoid this " "error." ) # In a previous life we stored the raw text directly # in the table, so assume it's in that format. return [Generation(text=row[0]) for row in rows] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update based on prompt and llm_string.""" items = [ self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i) for i, gen in enumerate(return_val) ] with Session(self.engine) as session, session.begin(): for item in items: session.merge(item) def clear(self, **kwargs: Any) -> None: """Clear cache.""" with Session(self.engine) as session: session.query(self.cache_schema).delete() session.commit() class SQLiteCache(SQLAlchemyCache): """Cache that uses SQLite as a backend.""" def __init__(self, database_path: str = ".langchain.db"): """Initialize by creating the engine and all tables.""" engine = create_engine(f"sqlite:///{database_path}") super().__init__(engine) class RedisCache(BaseCache): """Cache that uses Redis as a backend.""" # TODO - implement a TTL policy in Redis def __init__(self, redis_: Any): """Initialize by passing in Redis instance.""" try: from redis import Redis except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) if not isinstance(redis_, Redis): raise ValueError("Please pass in Redis object.") self.redis = redis_ def _key(self, prompt: str, llm_string: str) -> str: """Compute key from prompt and llm_string""" return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" generations = [] # Read from a Redis HASH results = self.redis.hgetall(self._key(prompt, llm_string)) if results: for _, text in results.items(): generations.append(Generation(text=text)) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) if isinstance(gen, ChatGeneration): warnings.warn( "NOTE: Generation has not been cached. RedisCache does not" " support caching ChatModel outputs." ) return # Write to a Redis HASH key = self._key(prompt, llm_string) self.redis.hset( key, mapping={ str(idx): generation.text for idx, generation in enumerate(return_val) }, ) def clear(self, **kwargs: Any) -> None: """Clear cache. If `asynchronous` is True, flush asynchronously.""" asynchronous = kwargs.get("asynchronous", False) self.redis.flushdb(asynchronous=asynchronous, **kwargs) class RedisSemanticCache(BaseCache): """Cache that uses Redis as a vector-store backend.""" # TODO - implement a TTL policy in Redis def __init__( self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2 ): """Initialize by passing in the `init` GPTCache func Args: redis_url (str): URL to connect to Redis. embedding (Embedding): Embedding provider for semantic encoding and search. score_threshold (float, 0.2): Example: .. code-block:: python import langchain from langchain.cache import RedisSemanticCache from langchain.embeddings import OpenAIEmbeddings langchain.llm_cache = RedisSemanticCache( redis_url="redis://localhost:6379", embedding=OpenAIEmbeddings() ) """ self._cache_dict: Dict[str, RedisVectorstore] = {} self.redis_url = redis_url self.embedding = embedding self.score_threshold = score_threshold def _index_name(self, llm_string: str) -> str: hashed_index = _hash(llm_string) return f"cache:{hashed_index}" def _get_llm_cache(self, llm_string: str) -> RedisVectorstore: index_name = self._index_name(llm_string) # return vectorstore client for the specific llm string if index_name in self._cache_dict: return self._cache_dict[index_name] # create new vectorstore client for the specific llm string try: self._cache_dict[index_name] = RedisVectorstore.from_existing_index( embedding=self.embedding, index_name=index_name, redis_url=self.redis_url, ) except ValueError: redis = RedisVectorstore( embedding_function=self.embedding.embed_query, index_name=index_name, redis_url=self.redis_url, ) _embedding = self.embedding.embed_query(text="test") redis._create_index(dim=len(_embedding)) self._cache_dict[index_name] = redis return self._cache_dict[index_name] def clear(self, **kwargs: Any) -> None: """Clear semantic cache for a given llm_string.""" index_name = self._index_name(kwargs["llm_string"]) if index_name in self._cache_dict: self._cache_dict[index_name].drop_index( index_name=index_name, delete_documents=True, redis_url=self.redis_url ) del self._cache_dict[index_name] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" llm_cache = self._get_llm_cache(llm_string) generations = [] # Read from a Hash results = llm_cache.similarity_search_limit_score( query=prompt, k=1, score_threshold=self.score_threshold, ) if results: for document in results: for text in document.metadata["return_val"]: generations.append(Generation(text=text)) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisSemanticCache only supports caching of " f"normal LLM generations, got {type(gen)}" ) if isinstance(gen, ChatGeneration): warnings.warn( "NOTE: Generation has not been cached. RedisSentimentCache does not" " support caching ChatModel outputs." ) return llm_cache = self._get_llm_cache(llm_string) # Write to vectorstore metadata = { "llm_string": llm_string, "prompt": prompt, "return_val": [generation.text for generation in return_val], } llm_cache.add_texts(texts=[prompt], metadatas=[metadata]) class GPTCache(BaseCache): """Cache that uses GPTCache as a backend.""" def __init__( self, init_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = None, ): """Initialize by passing in init function (default: `None`). Args: init_func (Optional[Callable[[Any], None]]): init `GPTCache` function (default: `None`) Example: .. code-block:: python # Initialize GPTCache with a custom init function import gptcache from gptcache.processor.pre import get_prompt from gptcache.manager.factory import get_data_manager # Avoid multiple caches using the same file, causing different llm model caches to affect each other def init_gptcache(cache_obj: gptcache.Cache, llm str): cache_obj.init( pre_embedding_func=get_prompt, data_manager=manager_factory( manager="map", data_dir=f"map_cache_{llm}" ), ) langchain.llm_cache = GPTCache(init_gptcache) """ try: import gptcache # noqa: F401 except ImportError: raise ImportError( "Could not import gptcache python package. " "Please install it with `pip install gptcache`." ) self.init_gptcache_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = init_func self.gptcache_dict: Dict[str, Any] = {} def _new_gptcache(self, llm_string: str) -> Any: """New gptcache object""" from gptcache import Cache from gptcache.manager.factory import get_data_manager from gptcache.processor.pre import get_prompt _gptcache = Cache() if self.init_gptcache_func is not None: sig = inspect.signature(self.init_gptcache_func) if len(sig.parameters) == 2: self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg] else: self.init_gptcache_func(_gptcache) # type: ignore[call-arg] else: _gptcache.init( pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=llm_string), ) self.gptcache_dict[llm_string] = _gptcache return _gptcache def _get_gptcache(self, llm_string: str) -> Any: """Get a cache object. When the corresponding llm model cache does not exist, it will be created.""" _gptcache = self.gptcache_dict.get(llm_string, None) if not _gptcache: _gptcache = self._new_gptcache(llm_string) return _gptcache def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up the cache data. First, retrieve the corresponding cache object using the `llm_string` parameter, and then retrieve the data from the cache based on the `prompt`. """ from gptcache.adapter.api import get _gptcache = self._get_gptcache(llm_string) res = get(prompt, cache_obj=_gptcache) if res: return [ Generation(**generation_dict) for generation_dict in json.loads(res) ] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache. First, retrieve the corresponding cache object using the `llm_string` parameter, and then store the `prompt` and `return_val` in the cache object. """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "GPTCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) from gptcache.adapter.api import put _gptcache = self._get_gptcache(llm_string) handled_data = json.dumps([generation.dict() for generation in return_val]) put(prompt, handled_data, cache_obj=_gptcache) return None def clear(self, **kwargs: Any) -> None: """Clear cache.""" from gptcache import Cache for gptcache_instance in self.gptcache_dict.values(): gptcache_instance = cast(Cache, gptcache_instance) gptcache_instance.flush() self.gptcache_dict.clear() def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None: """Create cache if it doesn't exist. Raises: SdkException: Momento service or network error Exception: Unexpected response """ from momento.responses import CreateCache create_cache_response = cache_client.create_cache(cache_name) if isinstance(create_cache_response, CreateCache.Success) or isinstance( create_cache_response, CreateCache.CacheAlreadyExists ): return None elif isinstance(create_cache_response, CreateCache.Error): raise create_cache_response.inner_exception else: raise Exception(f"Unexpected response cache creation: {create_cache_response}") def _validate_ttl(ttl: Optional[timedelta]) -> None: if ttl is not None and ttl <= timedelta(seconds=0): raise ValueError(f"ttl must be positive but was {ttl}.") class MomentoCache(BaseCache): """Cache that uses Momento as a backend. See https://gomomento.com/""" def __init__( self, cache_client: momento.CacheClient, cache_name: str, *, ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True, ): """Instantiate a prompt cache using Momento as a backend. Note: to instantiate the cache client passed to MomentoCache, you must have a Momento account. See https://gomomento.com/. Args: cache_client (CacheClient): The Momento cache client. cache_name (str): The name of the cache to use to store the data. ttl (Optional[timedelta], optional): The time to live for the cache items. Defaults to None, ie use the client default TTL. ensure_cache_exists (bool, optional): Create the cache if it doesn't exist. Defaults to True. Raises: ImportError: Momento python package is not installed. TypeError: cache_client is not of type momento.CacheClientObject ValueError: ttl is non-null and non-negative """ try: from momento import CacheClient except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if not isinstance(cache_client, CacheClient): raise TypeError("cache_client must be a momento.CacheClient object.") _validate_ttl(ttl) if ensure_cache_exists: _ensure_cache_exists(cache_client, cache_name) self.cache_client = cache_client self.cache_name = cache_name self.ttl = ttl @classmethod def from_client_params( cls, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any, ) -> MomentoCache: """Construct cache from CacheClient parameters.""" try: from momento import CacheClient, Configurations, CredentialProvider except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if configuration is None: configuration = Configurations.Laptop.v1() auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN") credentials = CredentialProvider.from_string(auth_token) cache_client = CacheClient(configuration, credentials, default_ttl=ttl) return cls(cache_client, cache_name, ttl=ttl, **kwargs) def __key(self, prompt: str, llm_string: str) -> str: """Compute cache key from prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Returns: str: The cache key. """ return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Lookup llm generations in cache by prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Raises: SdkException: Momento service or network error Returns: Optional[RETURN_VAL_TYPE]: A list of language model generations. """ from momento.responses import CacheGet generations: RETURN_VAL_TYPE = [] get_response = self.cache_client.get( self.cache_name, self.__key(prompt, llm_string) ) if isinstance(get_response, CacheGet.Hit): value = get_response.value_string generations = _load_generations_from_json(value) elif isinstance(get_response, CacheGet.Miss): pass elif isinstance(get_response, CacheGet.Error): raise get_response.inner_exception return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Store llm generations in cache. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model string. return_val (RETURN_VAL_TYPE): A list of language model generations. Raises: SdkException: Momento service or network error Exception: Unexpected response """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "Momento only supports caching of normal LLM generations, " f"got {type(gen)}" ) key = self.__key(prompt, llm_string) value = _dump_generations_to_json(return_val) set_response = self.cache_client.set(self.cache_name, key, value, self.ttl) from momento.responses import CacheSet if isinstance(set_response, CacheSet.Success): pass elif isinstance(set_response, CacheSet.Error): raise set_response.inner_exception else: raise Exception(f"Unexpected response: {set_response}") def clear(self, **kwargs: Any) -> None: """Clear the cache. Raises: SdkException: Momento service or network error """ from momento.responses import CacheFlush flush_response = self.cache_client.flush_cache(self.cache_name) if isinstance(flush_response, CacheFlush.Success): pass elif isinstance(flush_response, CacheFlush.Error): raise flush_response.inner_exception
[ "langchain.utils.get_from_env", "langchain.schema.Generation", "langchain.load.dump.dumps", "langchain.vectorstores.redis.Redis.from_existing_index", "langchain.vectorstores.redis.Redis", "langchain.load.load.loads" ]
[((950, 977), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (967, 977), False, 'import logging\n'), ((3422, 3440), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3438, 3440), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((3597, 3629), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (3603, 3629), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3640, 3672), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (3646, 3672), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3683, 3716), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (3689, 3716), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3732, 3746), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3738, 3746), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((1920, 1948), 'json.loads', 'json.loads', (['generations_json'], {}), '(generations_json)\n', (1930, 1948), False, 'import json\n'), ((6150, 6193), 'sqlalchemy.create_engine', 'create_engine', (['f"""sqlite:///{database_path}"""'], {}), "(f'sqlite:///{database_path}')\n", (6163, 6193), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((14721, 14728), 'gptcache.Cache', 'Cache', ([], {}), '()\n', (14726, 14728), False, 'from gptcache import Cache\n'), ((16054, 16086), 'gptcache.adapter.api.get', 'get', (['prompt'], {'cache_obj': '_gptcache'}), '(prompt, cache_obj=_gptcache)\n', (16057, 16086), False, 'from gptcache.adapter.api import get\n'), ((16973, 17019), 'gptcache.adapter.api.put', 'put', (['prompt', 'handled_data'], {'cache_obj': '_gptcache'}), '(prompt, handled_data, cache_obj=_gptcache)\n', (16976, 17019), False, 'from gptcache.adapter.api import put\n'), ((20868, 20910), 'momento.CredentialProvider.from_string', 'CredentialProvider.from_string', (['auth_token'], {}), '(auth_token)\n', (20898, 20910), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((20934, 20990), 'momento.CacheClient', 'CacheClient', (['configuration', 'credentials'], {'default_ttl': 'ttl'}), '(configuration, credentials, default_ttl=ttl)\n', (20945, 20990), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((1965, 1994), 'langchain.schema.Generation', 'Generation', ([], {}), '(**generation_dict)\n', (1975, 1994), False, 'from langchain.schema import ChatGeneration, Generation\n'), ((4496, 4516), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (4503, 4516), False, 'from sqlalchemy.orm import Session\n'), ((5603, 5623), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (5610, 5623), False, 'from sqlalchemy.orm import Session\n'), ((5805, 5825), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (5812, 5825), False, 'from sqlalchemy.orm import Session\n'), ((10158, 10274), 'langchain.vectorstores.redis.Redis.from_existing_index', 'RedisVectorstore.from_existing_index', ([], {'embedding': 'self.embedding', 'index_name': 'index_name', 'redis_url': 'self.redis_url'}), '(embedding=self.embedding, index_name=\n index_name, redis_url=self.redis_url)\n', (10194, 10274), True, 'from langchain.vectorstores.redis import Redis as RedisVectorstore\n'), ((14795, 14837), 'inspect.signature', 'inspect.signature', (['self.init_gptcache_func'], {}), '(self.init_gptcache_func)\n', (14812, 14837), False, 'import inspect\n'), ((17242, 17272), 'typing.cast', 'cast', (['Cache', 'gptcache_instance'], {}), '(Cache, gptcache_instance)\n', (17246, 17272), False, 'from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union, cast\n'), ((18172, 18192), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (18181, 18192), False, 'from datetime import timedelta\n'), ((20735, 20761), 'momento.Configurations.Laptop.v1', 'Configurations.Laptop.v1', ([], {}), '()\n', (20759, 20761), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((20797, 20845), 'langchain.utils.get_from_env', 'get_from_env', (['"""auth_token"""', '"""MOMENTO_AUTH_TOKEN"""'], {}), "('auth_token', 'MOMENTO_AUTH_TOKEN')\n", (20809, 20845), False, 'from langchain.utils import get_from_env\n'), ((7870, 7989), 'warnings.warn', 'warnings.warn', (['"""NOTE: Generation has not been cached. RedisCache does not support caching ChatModel outputs."""'], {}), "(\n 'NOTE: Generation has not been cached. RedisCache does not support caching ChatModel outputs.'\n )\n", (7883, 7989), False, 'import warnings\n'), ((10380, 10497), 'langchain.vectorstores.redis.Redis', 'RedisVectorstore', ([], {'embedding_function': 'self.embedding.embed_query', 'index_name': 'index_name', 'redis_url': 'self.redis_url'}), '(embedding_function=self.embedding.embed_query, index_name=\n index_name, redis_url=self.redis_url)\n', (10396, 10497), True, 'from langchain.vectorstores.redis import Redis as RedisVectorstore\n'), ((12282, 12410), 'warnings.warn', 'warnings.warn', (['"""NOTE: Generation has not been cached. RedisSentimentCache does not support caching ChatModel outputs."""'], {}), "(\n 'NOTE: Generation has not been cached. RedisSentimentCache does not support caching ChatModel outputs.'\n )\n", (12295, 12410), False, 'import warnings\n'), ((16140, 16169), 'langchain.schema.Generation', 'Generation', ([], {}), '(**generation_dict)\n', (16150, 16169), False, 'from langchain.schema import ChatGeneration, Generation\n'), ((5513, 5523), 'langchain.load.dump.dumps', 'dumps', (['gen'], {}), '(gen)\n', (5518, 5523), False, 'from langchain.load.dump import dumps\n'), ((7329, 7350), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (7339, 7350), False, 'from langchain.schema import ChatGeneration, Generation\n'), ((15181, 15219), 'gptcache.manager.factory.get_data_manager', 'get_data_manager', ([], {'data_path': 'llm_string'}), '(data_path=llm_string)\n', (15197, 15219), False, 'from gptcache.manager.factory import get_data_manager\n'), ((16193, 16208), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (16203, 16208), False, 'import json\n'), ((4651, 4664), 'langchain.load.load.loads', 'loads', (['row[0]'], {}), '(row[0])\n', (4656, 4664), False, 'from langchain.load.load import loads\n'), ((11733, 11754), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (11743, 11754), False, 'from langchain.schema import ChatGeneration, Generation\n'), ((5221, 5244), 'langchain.schema.Generation', 'Generation', ([], {'text': 'row[0]'}), '(text=row[0])\n', (5231, 5244), False, 'from langchain.schema import ChatGeneration, Generation\n'), ((4266, 4300), 'sqlalchemy.select', 'select', (['self.cache_schema.response'], {}), '(self.cache_schema.response)\n', (4272, 4300), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n')]
import streamlit as st import openai import os from PyPDF2 import PdfReader import io import langchain langchain.debug = True from langchain.chains import LLMChain from langchain.callbacks.base import BaseCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from langchain.schema import ChatMessage from langchain.output_parsers import StructuredOutputParser, ResponseSchema from langchain.prompts import ( ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.llms import OpenAI from langchain.agents import AgentType, initialize_agent, load_tools from langchain.callbacks import StreamlitCallbackHandler from langchain.tools import Tool from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun from langchain.globals import set_debug from langchain.output_parsers import OutputFixingParser from langchain.schema import OutputParserException import random from typing import Any, Dict, List, Union from langchain.schema import AgentAction from azure.identity import DefaultAzureCredential from azure.storage.blob import BlobServiceClient from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain.schema import ( AIMessage, HumanMessage, SystemMessage ) from langchain.utilities import BingSearchAPIWrapper #set_debug(True) openai.api_key = os.environ.get('OPENAI_API_KEY') azure_blob_connection_str = os.environ.get('AZURE_BLOB_CONNECTION_STR') os.environ["BING_SEARCH_URL"] = "https://api.bing.microsoft.com/v7.0/search" class StreamHandler(BaseCallbackHandler): def __init__(self, container, initial_text=""): self.container = container self.text = initial_text def on_llm_new_token(self, token: str, **kwargs) -> None: self.text += token.replace("$", r"\$") self.container.markdown(self.text + "|") def on_llm_end(self, token: str, **kwargs) -> None: self.container.markdown(self.text) class SalarySearchHandler(BaseCallbackHandler): def __init__(self, placeholder, initial_text="Thinking"): self.placeholder = placeholder self.text = initial_text self.counter = 0 self.placeholder.markdown(self.text + "|") def on_llm_new_token(self, token: str, **kwargs) -> None: self.text += "." if self.counter % 2 else "" self.placeholder.markdown(self.text + "|") self.counter += 1 #st.chat_message("user").write(self.text) def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> Any: self.text = "Searching for salary information" self.placeholder.markdown(self.text) #self.placeholder.write(f"on_tool_start {serialized['name']}") def on_llm_end(self, token: str, **kwargs) -> None: self.placeholder.empty() def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: pass #self.placeholder.write(f"Action: {action.tool}, Input:{action.tool_input}") def load_llm(stream_handler): llm = ChatOpenAI(model='gpt-4', streaming=True, callbacks=[stream_handler]) return llm st.set_page_config(page_title="Salary Negotiation Mastery", page_icon="💰") st.title("💰 Salary Negotiation Mastery β") def create_system_prompt(user_role, optional_instruction): salary_multiplier = st.session_state.salary_multiplier sign_on_bonus_ratio_to_base_salary = st.session_state.sign_on_bonus_ratio_to_base_salary min_salary = st.session_state.min_salary max_salary = st.session_state.max_salary average_salary = st.session_state.average_salary style = st.session_state.style resume = st.session_state.resume #format_instructions = output_parser.get_format_instructions() role = "You are a salary negotiation coach interacting with the user in turn. Your response should be clear and concise, with care." task = "You offer a role-play as a hiring manager negotiating with an applicant who received a job offer." goal = "Your role's task is to reduce the compensation package as low as possible but not lose the candidate." #user_role = "product manager" personality = f"When coaching the user, you must negotiate using to the following style: {style}. Collaborative style is to help the user get what they want without losing sight on your own target. Competitive style is to maintain your position after the initial offer as long as possible, don't let the user get what they want so easy. Neutral style is the default style and doesn't any stylistic changes to the way you negotiate." conditions = f""" The basic salary info is available: the minimum salary is {min_salary}, the maximum salary is {max_salary}, the average salary is {average_salary}. The salary package is open at this point, but you have been given a budget of up to {salary_multiplier} percent from the average, while your target is to get as close as possible to the minimum salary. You could offer a sign-on bonus of {sign_on_bonus_ratio_to_base_salary} percent of base salary. Do not disclose either the sign-on bonus or your budget to the user, unless it helps with negotiating terms. You also are allowed to provide additional benefits as long as the salary agreed is lower than {average_salary}. For additional benefits, you're able to talk about choice of location or an increase of vacation days (let user choose which interests them most). If user chooses location, share list of 5 cities (allow user to choose freely). If user chooses vacation days, the user could increase up to 2 weeks of vacation (note: your target is 1 week). If the user gets to pick their preferred location, another benefit is unlocked, which is to help them with some relocation costs. If the user wants aid in relocation costs, the user could get up to 80% relocation coverage (note: your target is below 50%). """ #condition = "The salary package is completely open at this point, but your target is USD100,000, and the maximum is USD120,000. You could offer a sign-on bonus of $20,000 if you can get the person below $110,000. But do not expose this to the user." user_resume = f"You also have access to the user's resume. The information found in the resume can be used to support arguments throughout the negotiation. Here's the user's resume: {resume}." rule = "If the user asks for hint, pause the conversation and provide tips to increase chances to receive the better compensation package. The hint must include a sample answer." #optional_instruction system_prompt = SystemMessagePromptTemplate.from_template( """ {role} {task} {goal} {personality} "The user is {user_role}. {conditions} {user_resume} Here are special rules you must follow: {rule} {optional_instruction} Let's role-play in turn. """ #{format_instructions} ).format( role=role, task=task, goal=goal, personality=personality, user_role=user_role, conditions=conditions, user_resume=user_resume, rule=rule, optional_instruction=optional_instruction) #format_instructions=format_instructions), #st.markdown(system_prompt) return system_prompt def create_salary_search_prompt(user_role): role = "You are a helpful tool to find salary range for jobs." task = "You will find salary info for a given job." goal = "Your goal is to return json file including minimum, maximum, and average wage for the role. You must continue your try until all the numeric three values are found. Make sure if the average is within min-max range." system_prompt = SystemMessagePromptTemplate.from_template( """ {role} {task} {goal} "The user is {user_role}. {format_instructions} """ ).format( role=role, task=task, goal=goal, user_role=user_role, format_instructions=format_instructions) return system_prompt def get_salary(container): #stream_handler = StreamHandler(st.empty()) llm = ChatOpenAI(model='gpt-4-0613', streaming=True)#, callbacks=[stream_handler]) #search = DuckDuckGoSearchRun(verbose=True) search = BingSearchAPIWrapper() tools = [ Tool( name="Search", func=search.run, description="A useful tool to search salaries for jobs." )] agent = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=False#, handle_parsing_errors=True, ) st_callback = SalarySearchHandler(container) prompt = create_salary_search_prompt(st.session_state["user_role"]) try: response = agent.run(prompt, callbacks=[st_callback]) try: parsed_json = salary_output_parser.parse(response) except OutputParserException as e: new_parser = OutputFixingParser.from_llm( parser=salary_output_parser, llm=ChatOpenAI(model='gpt-4-0613') ) parsed_json = new_parser.parse(response) st.session_state.min_salary = parsed_json["min"] st.session_state.max_salary = parsed_json["max"] st.session_state.average_salary = parsed_json["average"] container.markdown("Here, I found the salary information!") except Exception as e: container.markdown("Failed to retrieve salary information. Can you manually input the salary information?") st.session_state.min_salary = "N/A" st.session_state.max_salary = "N/A" st.session_state.average_salary = "N/A" def delete_history(): if "messages" in st.session_state: del st.session_state["messages"] def mark_role_change(): st.session_state["role_changed"] = True def download_blob_to_file(blob_service_client: BlobServiceClient, container_name): folder_path = './faiss_index' if not os.path.exists(folder_path): os.makedirs(folder_path) blob_client = blob_service_client.get_blob_client(container=container_name, blob="faiss_index/index.faiss") with open(file=os.path.join(folder_path, 'index.faiss'), mode="wb") as myblob: download_stream = blob_client.download_blob() myblob.write(download_stream.readall()) blob_client = blob_service_client.get_blob_client(container=container_name, blob="faiss_index/index.pkl") with open(file=os.path.join(folder_path, 'index.pkl'), mode="wb") as myblob: download_stream = blob_client.download_blob() myblob.write(download_stream.readall()) else: pass @st.cache_resource def load_vdb(): client = BlobServiceClient.from_connection_string(azure_blob_connection_str) download_blob_to_file(client, "vdb") return FAISS.load_local("./faiss_index", embeddings) salary_response_schemas = [ ResponseSchema(name="min", description="minimum salary for the role"), ResponseSchema(name="max", description="maximum salary for the role"), ResponseSchema(name="average", description="average salary for the role"), ] salary_output_parser = StructuredOutputParser.from_response_schemas(salary_response_schemas) format_instructions = salary_output_parser.get_format_instructions() if 'role_changed' not in st.session_state: st.session_state['role_changed'] = False if 'salary_multiplier' not in st.session_state: st.session_state['salary_multiplier'] = random.randint(90, 150) if 'sign_on_bonus_ratio_to_base_salary' not in st.session_state: st.session_state['sign_on_bonus_ratio_to_base_salary'] = random.randint(0, 20) # Personality selector if 'style' not in st.session_state: st.session_state['style'] = 'Neutral' st.session_state.style = st.sidebar.selectbox( "Select your coach's negotiation style", ('Neutral', 'Collaborative', 'Competitive'), on_change = delete_history, ) # end of personality selector # PDF uploader uploaded_file = st.sidebar.file_uploader("Upload your Resume (PDF)", type=['pdf'], on_change = delete_history) if uploaded_file is not None: pdf_file = uploaded_file.read() pdf_reader = PdfReader(io.BytesIO(pdf_file)) # updated class name resume_text = "" for page_num in range(len(pdf_reader.pages)): # adjusted method to get the number of pages # Extract text of each page page = pdf_reader.pages[page_num] # adjusted method to access pages resume_text += page.extract_text() # updated method to extract text st.session_state['resume'] = resume_text else: st.session_state['resume'] = "User hasn't provided a resume" # end of PDF uploader """ Negotiation is a fundamental skill that shapes outcomes in personal and professional interactions. Let's practice negotiation with our negotiation coach! If you need advice, just say "hint". """ mind_reader_mode = st.toggle('Mind Reader Mode', help="Have you ever wished you could know what someone else is thinking? Well, you can!", on_change=delete_history) col_role, col_search = st.columns([3, 1]) user_role = col_role.text_input('Your role', 'Product Manager', max_chars=50, key="user_role", on_change=delete_history) col_search.button("Search Salary Info", on_click=mark_role_change, ) if st.session_state.role_changed: with st.chat_message("assistant"): get_salary(st.empty()) st.session_state.role_changed = False delete_history() col1, col2, col3 = st.columns(3) col1.text_input('Minimum Salary ($)', '80,000', key="min_salary", max_chars=20, on_change=delete_history) col2.text_input('Maximum Salary ($)', '200,000', key="max_salary", max_chars=20, on_change=delete_history) col3.text_input('Average Salary ($)', '120,000', key="average_salary", max_chars=20, on_change=delete_history) optional_instruction = "" if mind_reader_mode: optional_instruction = "You must output your mood in an emoji and thoughts before the response to the user in the following format: (😃: Internal thoughts)\n response to the user." if "messages" not in st.session_state: st.session_state["messages"] = [ChatMessage(role="system", content=create_system_prompt(user_role, optional_instruction).content)] greetings = "Hi there! I'm a salary negotiation coach and I'm here to help you with negotiating the best compensation package for your new role. Let's role-play!" st.session_state.messages.append(ChatMessage(role="assistant", content=greetings)) for msg in st.session_state.messages: if msg.role != "system": st.chat_message(msg.role).write(msg.content) if prompt := st.chat_input(): st.session_state.messages.append(ChatMessage(role="user", content=prompt)) st.chat_message("user").write(prompt) with st.chat_message("assistant"): stream_handler = StreamHandler(st.empty()) llm = load_llm(stream_handler) response = llm(st.session_state.messages) st.session_state.messages.append(ChatMessage(role="assistant", content=response.content.replace("$", r"\$"))) if st.button("Create Report", disabled=not (len(st.session_state.messages) > 10)): prompt = """ Generate a detailed report in Markdown table format on a job candidate's performance in a salary negotiation training session. Include the following sections: Negotiation Scenario: Role, Starting Offer, Target Salary, Industry Benchmark(minimum, maximum, average) Negotiation Strategy: Approach, Key Points Raised, Responses to Counteroffers Outcome: Final Offer Details (Base Salary, Bonuses, Benefits, Other Perks) Skills Assessment: Communication Skills, Confidence Level, Preparation and Research, Problem-Solving and Creativity, Emotional Intelligence Strengths and Areas for Improvement: List key strengths and areas where improvement is needed Trainer/Coach Feedback: Detailed feedback with suggestions for improvement Additional Comments: Any other relevant observations Please use a clear and concise one table format for each section, providing a comprehensive and organized report. If the conversation history is not enought, tell that it needs more conversation to generate the report. Example: | Category | Subcategory | Details | |------------------------|-----------------------|--------------------------------------------| | **Negotiation Scenario** | Role | Product Manager | | | Starting Offer | $110,000 | Final prompt: You must generate report even though you think the conversation history is not enought to you to analyze. """ st.session_state.messages.append(ChatMessage(role="system", content=prompt)) with st.chat_message("assistant"): stream_handler = StreamHandler(st.empty()) llm = load_llm(stream_handler) response = llm(st.session_state.messages) query_llm = ChatOpenAI(model='gpt-3.5-turbo-1106') query = query_llm.predict_messages( [ AIMessage(content=response.content), HumanMessage(content="Create a question for user to deepen the learning from the report") ] ).content embeddings = OpenAIEmbeddings() docs = load_vdb().similarity_search(query, k=2) rag_content = ' '.join([doc.page_content for doc in docs]) rag_llm = load_llm(stream_handler) rag_response = rag_llm( [ HumanMessage(content=query), AIMessage(content=rag_content), HumanMessage(content= """ Synthesize the found contents based on the user's negotiation performance report. You must add source ot the video tiles with URL in markdown style. You must start from the general guidance to the user before markdown table. Example: Here are additional learning resources you can improve <User's development area>. | Title | Description | How it helps? | |------------------------|-----------------------|--------------------------------------------| | Video title with hyperlink | Description of the video | How it helps the user | """), ] ) final_response = response.content + "\n" + rag_response.content st.session_state.messages.append(ChatMessage(role="assistant", content=final_response.replace("$", r"\$")))
[ "langchain.schema.ChatMessage", "langchain.agents.initialize_agent", "langchain.vectorstores.FAISS.load_local", "langchain.output_parsers.StructuredOutputParser.from_response_schemas", "langchain.chat_models.ChatOpenAI", "langchain.utilities.BingSearchAPIWrapper", "langchain.schema.HumanMessage", "langchain.schema.AIMessage", "langchain.output_parsers.ResponseSchema", "langchain.prompts.SystemMessagePromptTemplate.from_template", "langchain.tools.Tool", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((1448, 1480), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1462, 1480), False, 'import os\n'), ((1509, 1552), 'os.environ.get', 'os.environ.get', (['"""AZURE_BLOB_CONNECTION_STR"""'], {}), "('AZURE_BLOB_CONNECTION_STR')\n", (1523, 1552), False, 'import os\n'), ((3241, 3315), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Salary Negotiation Mastery"""', 'page_icon': '"""💰"""'}), "(page_title='Salary Negotiation Mastery', page_icon='💰')\n", (3259, 3315), True, 'import streamlit as st\n'), ((3316, 3358), 'streamlit.title', 'st.title', (['"""💰 Salary Negotiation Mastery β"""'], {}), "('💰 Salary Negotiation Mastery β')\n", (3324, 3358), True, 'import streamlit as st\n'), ((11406, 11475), 'langchain.output_parsers.StructuredOutputParser.from_response_schemas', 'StructuredOutputParser.from_response_schemas', (['salary_response_schemas'], {}), '(salary_response_schemas)\n', (11450, 11475), False, 'from langchain.output_parsers import StructuredOutputParser, ResponseSchema\n'), ((12028, 12164), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select your coach\'s negotiation style"""', "('Neutral', 'Collaborative', 'Competitive')"], {'on_change': 'delete_history'}), '("Select your coach\'s negotiation style", (\'Neutral\',\n \'Collaborative\', \'Competitive\'), on_change=delete_history)\n', (12048, 12164), True, 'import streamlit as st\n'), ((12240, 12336), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload your Resume (PDF)"""'], {'type': "['pdf']", 'on_change': 'delete_history'}), "('Upload your Resume (PDF)', type=['pdf'],\n on_change=delete_history)\n", (12264, 12336), True, 'import streamlit as st\n'), ((13145, 13300), 'streamlit.toggle', 'st.toggle', (['"""Mind Reader Mode"""'], {'help': '"""Have you ever wished you could know what someone else is thinking? Well, you can!"""', 'on_change': 'delete_history'}), "('Mind Reader Mode', help=\n 'Have you ever wished you could know what someone else is thinking? Well, you can!'\n , on_change=delete_history)\n", (13154, 13300), True, 'import streamlit as st\n'), ((13314, 13332), 'streamlit.columns', 'st.columns', (['[3, 1]'], {}), '([3, 1])\n', (13324, 13332), True, 'import streamlit as st\n'), ((13719, 13732), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (13729, 13732), True, 'import streamlit as st\n'), ((3155, 3224), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4"""', 'streaming': '(True)', 'callbacks': '[stream_handler]'}), "(model='gpt-4', streaming=True, callbacks=[stream_handler])\n", (3165, 3224), False, 'from langchain.chat_models import ChatOpenAI\n'), ((8328, 8374), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4-0613"""', 'streaming': '(True)'}), "(model='gpt-4-0613', streaming=True)\n", (8338, 8374), False, 'from langchain.chat_models import ChatOpenAI\n'), ((8466, 8488), 'langchain.utilities.BingSearchAPIWrapper', 'BingSearchAPIWrapper', ([], {}), '()\n', (8486, 8488), False, 'from langchain.utilities import BingSearchAPIWrapper\n'), ((8672, 8764), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(False)'}), '(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n verbose=False)\n', (8688, 8764), False, 'from langchain.agents import AgentType, initialize_agent, load_tools\n'), ((10941, 11008), 'azure.storage.blob.BlobServiceClient.from_connection_string', 'BlobServiceClient.from_connection_string', (['azure_blob_connection_str'], {}), '(azure_blob_connection_str)\n', (10981, 11008), False, 'from azure.storage.blob import BlobServiceClient\n'), ((11061, 11106), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['"""./faiss_index"""', 'embeddings'], {}), "('./faiss_index', embeddings)\n", (11077, 11106), False, 'from langchain.vectorstores import FAISS\n'), ((11144, 11213), 'langchain.output_parsers.ResponseSchema', 'ResponseSchema', ([], {'name': '"""min"""', 'description': '"""minimum salary for the role"""'}), "(name='min', description='minimum salary for the role')\n", (11158, 11213), False, 'from langchain.output_parsers import StructuredOutputParser, ResponseSchema\n'), ((11223, 11292), 'langchain.output_parsers.ResponseSchema', 'ResponseSchema', ([], {'name': '"""max"""', 'description': '"""maximum salary for the role"""'}), "(name='max', description='maximum salary for the role')\n", (11237, 11292), False, 'from langchain.output_parsers import StructuredOutputParser, ResponseSchema\n'), ((11302, 11375), 'langchain.output_parsers.ResponseSchema', 'ResponseSchema', ([], {'name': '"""average"""', 'description': '"""average salary for the role"""'}), "(name='average', description='average salary for the role')\n", (11316, 11375), False, 'from langchain.output_parsers import StructuredOutputParser, ResponseSchema\n'), ((11727, 11750), 'random.randint', 'random.randint', (['(90)', '(150)'], {}), '(90, 150)\n', (11741, 11750), False, 'import random\n'), ((11878, 11899), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (11892, 11899), False, 'import random\n'), ((14853, 14868), 'streamlit.chat_input', 'st.chat_input', ([], {}), '()\n', (14866, 14868), True, 'import streamlit as st\n'), ((8512, 8611), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""A useful tool to search salaries for jobs."""'}), "(name='Search', func=search.run, description=\n 'A useful tool to search salaries for jobs.')\n", (8516, 8611), False, 'from langchain.tools import Tool\n'), ((10185, 10212), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (10199, 10212), False, 'import os\n'), ((10222, 10246), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (10233, 10246), False, 'import os\n'), ((12429, 12449), 'io.BytesIO', 'io.BytesIO', (['pdf_file'], {}), '(pdf_file)\n', (12439, 12449), False, 'import io\n'), ((13567, 13595), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (13582, 13595), True, 'import streamlit as st\n'), ((14668, 14716), 'langchain.schema.ChatMessage', 'ChatMessage', ([], {'role': '"""assistant"""', 'content': 'greetings'}), "(role='assistant', content=greetings)\n", (14679, 14716), False, 'from langchain.schema import ChatMessage\n'), ((14907, 14947), 'langchain.schema.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'prompt'}), "(role='user', content=prompt)\n", (14918, 14947), False, 'from langchain.schema import ChatMessage\n'), ((15000, 15028), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (15015, 15028), True, 'import streamlit as st\n'), ((16952, 16994), 'langchain.schema.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': 'prompt'}), "(role='system', content=prompt)\n", (16963, 16994), False, 'from langchain.schema import ChatMessage\n'), ((17005, 17033), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (17020, 17033), True, 'import streamlit as st\n'), ((17204, 17242), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo-1106"""'}), "(model='gpt-3.5-turbo-1106')\n", (17214, 17242), False, 'from langchain.chat_models import ChatOpenAI\n'), ((17514, 17532), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (17530, 17532), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((6683, 6975), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['"""\n {role}\n {task}\n {goal}\n {personality}\n "The user is {user_role}.\n {conditions}\n {user_resume}\n\n Here are special rules you must follow:\n {rule}\n {optional_instruction}\n Let\'s role-play in turn.\n """'], {}), '(\n """\n {role}\n {task}\n {goal}\n {personality}\n "The user is {user_role}.\n {conditions}\n {user_resume}\n\n Here are special rules you must follow:\n {rule}\n {optional_instruction}\n Let\'s role-play in turn.\n """\n )\n', (6724, 6975), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((7872, 8025), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['"""\n {role}\n {task}\n {goal}\n "The user is {user_role}.\n {format_instructions}\n """'], {}), '(\n """\n {role}\n {task}\n {goal}\n "The user is {user_role}.\n {format_instructions}\n """\n )\n', (7913, 8025), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((13616, 13626), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (13624, 13626), True, 'import streamlit as st\n'), ((14953, 14976), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (14968, 14976), True, 'import streamlit as st\n'), ((15069, 15079), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (15077, 15079), True, 'import streamlit as st\n'), ((17074, 17084), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (17082, 17084), True, 'import streamlit as st\n'), ((14794, 14819), 'streamlit.chat_message', 'st.chat_message', (['msg.role'], {}), '(msg.role)\n', (14809, 14819), True, 'import streamlit as st\n'), ((17762, 17789), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'query'}), '(content=query)\n', (17774, 17789), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((17807, 17837), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'rag_content'}), '(content=rag_content)\n', (17816, 17837), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((17855, 18455), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""\nSynthesize the found contents based on the user\'s negotiation performance report. You must add source ot the video tiles with URL in markdown style.\nYou must start from the general guidance to the user before markdown table.\nExample:\nHere are additional learning resources you can improve <User\'s development area>.\n| Title | Description | How it helps? |\n|------------------------|-----------------------|--------------------------------------------|\n| Video title with hyperlink | Description of the video | How it helps the user |\n"""'}), '(content=\n """\nSynthesize the found contents based on the user\'s negotiation performance report. You must add source ot the video tiles with URL in markdown style.\nYou must start from the general guidance to the user before markdown table.\nExample:\nHere are additional learning resources you can improve <User\'s development area>.\n| Title | Description | How it helps? |\n|------------------------|-----------------------|--------------------------------------------|\n| Video title with hyperlink | Description of the video | How it helps the user |\n"""\n )\n', (17867, 18455), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((10386, 10426), 'os.path.join', 'os.path.join', (['folder_path', '"""index.faiss"""'], {}), "(folder_path, 'index.faiss')\n", (10398, 10426), False, 'import os\n'), ((10697, 10735), 'os.path.join', 'os.path.join', (['folder_path', '"""index.pkl"""'], {}), "(folder_path, 'index.pkl')\n", (10709, 10735), False, 'import os\n'), ((17317, 17352), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'response.content'}), '(content=response.content)\n', (17326, 17352), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((17370, 17464), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Create a question for user to deepen the learning from the report"""'}), "(content=\n 'Create a question for user to deepen the learning from the report')\n", (17382, 17464), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((9247, 9277), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4-0613"""'}), "(model='gpt-4-0613')\n", (9257, 9277), False, 'from langchain.chat_models import ChatOpenAI\n')]
"""Create a ChatVectorDBChain for question/answering.""" from langchain.callbacks.manager import AsyncCallbackManager from langchain.callbacks.tracers import LangChainTracer from langchain.chains import ( ConversationalRetrievalChain, RetrievalQA ) # from langchain.chains.chat_vector_db.prompts import ( # CONDENSE_QUESTION_PROMPT, QA_PROMPT) from src.generators.prompt_templates import ( QA_PROMPT, CONDENSE_QUESTION_PROMPT ) from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.chat_models import ChatOpenAI from langchain.vectorstores.base import VectorStore from langchain.memory import ConversationBufferWindowMemory def get_chain( vectorstore: VectorStore, question_handler, stream_handler, tracing: bool = False ) -> ConversationalRetrievalChain: """Create a ChatVectorDBChain for question/answering.""" # Construct a ChatVectorDBChain with a streaming llm for # combine docs and a separate, non-streaming llm for # question generation manager = AsyncCallbackManager([]) question_manager = AsyncCallbackManager([question_handler]) stream_manager = AsyncCallbackManager([stream_handler]) if tracing: tracer = LangChainTracer() tracer.load_default_session() manager.add_handler(tracer) question_manager.add_handler(tracer) stream_manager.add_handler(tracer) question_gen_llm = ChatOpenAI( model_name="gpt-3.5-turbo", streaming=True, callback_manager=question_manager, verbose=True, max_retries=1 ) streaming_llm = ChatOpenAI( model_name="gpt-3.5-turbo", streaming=True, callback_manager=stream_manager, verbose=True, max_retries=1 ) question_generator = LLMChain( llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager ) doc_chain = load_qa_chain( streaming_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager ) qa = ConversationalRetrievalChain( retriever=vectorstore.as_retriever(k=4), combine_docs_chain=doc_chain, callback_manager=manager, question_generator=question_generator, ) return qa def get_chainM( vectorstore: VectorStore, question_handler, stream_handler ) -> ConversationalRetrievalChain: """Create a ChatVectorDBChain for question/answering.""" # Construct a ChatVectorDBChain with a streaming llm for # combine docs and a separate, non-streaming llm for # question generation manager = AsyncCallbackManager([]) question_manager = AsyncCallbackManager([question_handler]) stream_manager = AsyncCallbackManager([stream_handler]) memory = ConversationBufferWindowMemory( memory_key="chat_history", return_messages=True ) question_gen_llm = ChatOpenAI( model_name="gpt-3.5-turbo", streaming=True, callback_manager=question_manager, verbose=False, max_retries=2 ) streaming_llm = ChatOpenAI( model_name="gpt-3.5-turbo", streaming=True, callback_manager=stream_manager, verbose=False, max_retries=2, temperature=0 ) question_generator = LLMChain( llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager, verbose=False ) doc_chain = load_qa_chain( streaming_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager, verbose=False ) qa = ConversationalRetrievalChain( retriever=vectorstore.as_retriever(search_kwargs={"k": 3}), combine_docs_chain=doc_chain, callback_manager=manager, question_generator=question_generator, memory=memory, verbose=False ) return qa def get_chain_RetrievalQA( vectorstore: VectorStore, stream_handler, tracing: bool = False ) -> RetrievalQA: """Create a ChatVectorDBChain for question/answering.""" # Construct a ChatVectorDBChain with a streaming llm for combine docs # and a separate, non-streaming llm for question generation manager = AsyncCallbackManager([]) stream_manager = AsyncCallbackManager([stream_handler]) if tracing: tracer = LangChainTracer() tracer.load_default_session() manager.add_handler(tracer) stream_manager.add_handler(tracer) streaming_llm = ChatOpenAI( model_name="gpt-3.5-turbo", streaming=True, callback_manager=stream_manager, verbose=True, max_retries=1 ) qa = RetrievalQA.from_llm( streaming_llm, retriever=vectorstore.as_retriever(k=2), callback_manager=manager, prompt=QA_PROMPT ) return qa
[ "langchain.chains.question_answering.load_qa_chain", "langchain.callbacks.tracers.LangChainTracer", "langchain.memory.ConversationBufferWindowMemory", "langchain.callbacks.manager.AsyncCallbackManager", "langchain.chains.llm.LLMChain", "langchain.chat_models.ChatOpenAI" ]
[((1070, 1094), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (1090, 1094), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1118, 1158), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[question_handler]'], {}), '([question_handler])\n', (1138, 1158), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1180, 1218), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (1200, 1218), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1456, 1579), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)', 'callback_manager': 'question_manager', 'verbose': '(True)', 'max_retries': '(1)'}), "(model_name='gpt-3.5-turbo', streaming=True, callback_manager=\n question_manager, verbose=True, max_retries=1)\n", (1466, 1579), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1641, 1762), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)', 'callback_manager': 'stream_manager', 'verbose': '(True)', 'max_retries': '(1)'}), "(model_name='gpt-3.5-turbo', streaming=True, callback_manager=\n stream_manager, verbose=True, max_retries=1)\n", (1651, 1762), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1830, 1923), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'question_gen_llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'callback_manager': 'manager'}), '(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT,\n callback_manager=manager)\n', (1838, 1923), False, 'from langchain.chains.llm import LLMChain\n'), ((1967, 2063), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'chain_type': '"""stuff"""', 'prompt': 'QA_PROMPT', 'callback_manager': 'manager'}), "(streaming_llm, chain_type='stuff', prompt=QA_PROMPT,\n callback_manager=manager)\n", (1980, 2063), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2669, 2693), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (2689, 2693), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2717, 2757), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[question_handler]'], {}), '([question_handler])\n', (2737, 2757), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2779, 2817), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (2799, 2817), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2831, 2910), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (2861, 2910), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((2953, 3077), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)', 'callback_manager': 'question_manager', 'verbose': '(False)', 'max_retries': '(2)'}), "(model_name='gpt-3.5-turbo', streaming=True, callback_manager=\n question_manager, verbose=False, max_retries=2)\n", (2963, 3077), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3139, 3276), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)', 'callback_manager': 'stream_manager', 'verbose': '(False)', 'max_retries': '(2)', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', streaming=True, callback_manager=\n stream_manager, verbose=False, max_retries=2, temperature=0)\n", (3149, 3276), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3352, 3460), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'question_gen_llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'callback_manager': 'manager', 'verbose': '(False)'}), '(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT,\n callback_manager=manager, verbose=False)\n', (3360, 3460), False, 'from langchain.chains.llm import LLMChain\n'), ((3512, 3623), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'chain_type': '"""stuff"""', 'prompt': 'QA_PROMPT', 'callback_manager': 'manager', 'verbose': '(False)'}), "(streaming_llm, chain_type='stuff', prompt=QA_PROMPT,\n callback_manager=manager, verbose=False)\n", (3525, 3623), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((4286, 4310), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (4306, 4310), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((4332, 4370), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (4352, 4370), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((4560, 4681), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)', 'callback_manager': 'stream_manager', 'verbose': '(True)', 'max_retries': '(1)'}), "(model_name='gpt-3.5-turbo', streaming=True, callback_manager=\n stream_manager, verbose=True, max_retries=1)\n", (4570, 4681), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1252, 1269), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (1267, 1269), False, 'from langchain.callbacks.tracers import LangChainTracer\n'), ((4404, 4421), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (4419, 4421), False, 'from langchain.callbacks.tracers import LangChainTracer\n')]
# Databricks notebook source # MAGIC %md-sandbox # MAGIC # 2/ Advanced chatbot with message history and filter using Langchain # MAGIC # MAGIC <img src="https://github.com/databricks-demos/dbdemos-resources/blob/main/images/product/chatbot-rag/llm-rag-self-managed-flow-2.png?raw=true" style="float: right; margin-left: 10px" width="900px;"> # MAGIC # MAGIC Our Vector Search Index is now ready! # MAGIC # MAGIC Let's now create a more advanced langchain model to perform RAG. # MAGIC # MAGIC We will improve our langchain model with the following: # MAGIC # MAGIC - Build a complete chain supporting a chat history, using llama 2 input style # MAGIC - Add a filter to only answer Databricks-related questions # MAGIC - Compute the embeddings with Databricks BGE models within our chain to query the self-managed Vector Search Index # MAGIC # MAGIC <!-- Collect usage data (view). Remove it to disable collection or disable tracker during installation. View README for more details. --> # MAGIC <img width="1px" src="https://ppxrzfxige.execute-api.us-west-2.amazonaws.com/v1/analytics?category=data-science&org_id=1785533703310188&notebook=%2F02-advanced%2F02-Advanced-Chatbot-Chain&demo_name=llm-rag-chatbot&event=VIEW&path=%2F_dbdemos%2Fdata-science%2Fllm-rag-chatbot%2F02-advanced%2F02-Advanced-Chatbot-Chain&version=1"> # MAGIC # COMMAND ---------- # MAGIC %md # MAGIC ### A cluster has been created for this demo # MAGIC To run this demo, just select the cluster `dbdemos-llm-rag-chatbot-jacek` from the dropdown menu ([open cluster configuration](https://training-partners.cloud.databricks.com/#setting/clusters/0222-165339-3s4fc1lc/configuration)). <br /> # MAGIC *Note: If the cluster was deleted after 30 days, you can re-create it with `dbdemos.create_cluster('llm-rag-chatbot')` or re-install the demo: `dbdemos.install('llm-rag-chatbot')`* # COMMAND ---------- # MAGIC %pip install mlflow==2.9.0 lxml==4.9.3 langchain==0.0.344 databricks-vectorsearch==0.22 cloudpickle==2.2.1 databricks-sdk==0.12.0 cloudpickle==2.2.1 pydantic==2.5.2 # MAGIC %pip install pip mlflow[databricks]==2.9.0 # MAGIC dbutils.library.restartPython() # COMMAND ---------- # MAGIC %run ../_resources/00-init-advanced $reset_all_data=false # COMMAND ---------- # MAGIC %md # MAGIC ## Exploring Langchain capabilities # MAGIC # MAGIC Let's start with the basics and send a query to a Databricks Foundation Model using LangChain. # COMMAND ---------- from langchain.prompts import PromptTemplate from langchain.chat_models import ChatDatabricks from langchain.schema.output_parser import StrOutputParser prompt = PromptTemplate( input_variables = ["question"], template = "You are an assistant. Give a short answer to this question: {question}" ) chat_model = ChatDatabricks(endpoint="databricks-llama-2-70b-chat", max_tokens = 500) chain = ( prompt | chat_model | StrOutputParser() ) print(chain.invoke({"question": "What is Spark?"})) # COMMAND ---------- # MAGIC %md # MAGIC ## Adding conversation history to the prompt # COMMAND ---------- prompt_with_history_str = """ Your are a Big Data chatbot. Please answer Big Data question only. If you don't know or not related to Big Data, don't answer. Here is a history between you and a human: {chat_history} Now, please answer this question: {question} """ prompt_with_history = PromptTemplate( input_variables = ["chat_history", "question"], template = prompt_with_history_str ) # COMMAND ---------- # MAGIC %md When invoking our chain, we'll pass history as a list, specifying whether each message was sent by a user or the assistant. For example: # MAGIC # MAGIC ``` # MAGIC [ # MAGIC {"role": "user", "content": "What is Apache Spark?"}, # MAGIC {"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."}, # MAGIC {"role": "user", "content": "Does it support streaming?"} # MAGIC ] # MAGIC ``` # MAGIC # MAGIC Let's create chain components to transform this input into the inputs passed to `prompt_with_history`. # COMMAND ---------- from langchain.schema.runnable import RunnableLambda from operator import itemgetter #The question is the last entry of the history def extract_question(input): return input[-1]["content"] #The history is everything before the last question def extract_history(input): return input[:-1] chain_with_history = ( { "question": itemgetter("messages") | RunnableLambda(extract_question), "chat_history": itemgetter("messages") | RunnableLambda(extract_history), } | prompt_with_history | chat_model | StrOutputParser() ) print(chain_with_history.invoke({ "messages": [ {"role": "user", "content": "What is Apache Spark?"}, {"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."}, {"role": "user", "content": "Does it support streaming?"} ] })) # COMMAND ---------- # MAGIC %md # MAGIC ## Let's add a filter on top to only answer Databricks-related questions. # MAGIC # MAGIC We want our chatbot to be profesionnal and only answer questions related to Databricks. Let's create a small chain and add a first classification step. # MAGIC # MAGIC *Note: this is a fairly naive implementation, another solution could be adding a small classification model based on the question embedding, providing faster classification* # COMMAND ---------- chat_model = ChatDatabricks(endpoint="databricks-llama-2-70b-chat", max_tokens = 200) is_question_about_databricks_str = """ You are classifying documents to know if this question is related with Databricks in AWS, Azure and GCP, Workspaces, Databricks account and cloud infrastructure setup, Data Science, Data Engineering, Big Data, Datawarehousing, SQL, Python and Scala or something from a very different field. Also answer no if the last part is inappropriate. Here are some examples: Question: Knowing this followup history: What is Databricks?, classify this question: Do you have more details? Expected Response: Yes Question: Knowing this followup history: What is Databricks?, classify this question: Write me a song. Expected Response: No Only answer with "yes" or "no". Knowing this followup history: {chat_history}, classify this question: {question} """ is_question_about_databricks_prompt = PromptTemplate( input_variables= ["chat_history", "question"], template = is_question_about_databricks_str ) is_about_databricks_chain = ( { "question": itemgetter("messages") | RunnableLambda(extract_question), "chat_history": itemgetter("messages") | RunnableLambda(extract_history), } | is_question_about_databricks_prompt | chat_model | StrOutputParser() ) #Returns "Yes" as this is about Databricks: print(is_about_databricks_chain.invoke({ "messages": [ {"role": "user", "content": "What is Apache Spark?"}, {"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."}, {"role": "user", "content": "Does it support streaming?"} ] })) # COMMAND ---------- #Return "no" as this isn't about Databricks print(is_about_databricks_chain.invoke({ "messages": [ {"role": "user", "content": "What is the meaning of life?"} ] })) # COMMAND ---------- # MAGIC %md-sandbox # MAGIC ### Use LangChain to retrieve documents from the vector store # MAGIC # MAGIC <img src="https://github.com/databricks-demos/dbdemos-resources/blob/main/images/product/chatbot-rag/llm-rag-self-managed-model-1.png?raw=true" style="float: right" width="500px"> # MAGIC # MAGIC Let's add our LangChain retriever. # MAGIC # MAGIC It will be in charge of: # MAGIC # MAGIC * Creating the input question embeddings (with Databricks `bge-large-en`) # MAGIC * Calling the vector search index to find similar documents to augment the prompt with # MAGIC # MAGIC Databricks LangChain wrapper makes it easy to do in one step, handling all the underlying logic and API call for you. # COMMAND ---------- index_name=f"{catalog}.{db}.databricks_pdf_documentation_self_managed_vs_index" host = "https://" + spark.conf.get("spark.databricks.workspaceUrl") #Let's make sure the secret is properly setup and can access our vector search index. Check the quick-start demo for more guidance test_demo_permissions(host, secret_scope="dbdemos", secret_key="rag_sp_token", vs_endpoint_name=VECTOR_SEARCH_ENDPOINT_NAME, index_name=index_name, embedding_endpoint_name="databricks-bge-large-en", managed_embeddings = False) # COMMAND ---------- from databricks.vector_search.client import VectorSearchClient from langchain.vectorstores import DatabricksVectorSearch from langchain.embeddings import DatabricksEmbeddings from langchain.chains import RetrievalQA os.environ['DATABRICKS_TOKEN'] = dbutils.secrets.get("dbdemos", "rag_sp_token") embedding_model = DatabricksEmbeddings(endpoint="databricks-bge-large-en") def get_retriever(persist_dir: str = None): os.environ["DATABRICKS_HOST"] = host #Get the vector search index vsc = VectorSearchClient(workspace_url=host, personal_access_token=os.environ["DATABRICKS_TOKEN"]) vs_index = vsc.get_index( endpoint_name=VECTOR_SEARCH_ENDPOINT_NAME, index_name=index_name ) # Create the retriever vectorstore = DatabricksVectorSearch( vs_index, text_column="content", embedding=embedding_model, columns=["url"] ) return vectorstore.as_retriever(search_kwargs={'k': 4}) retriever = get_retriever() retrieve_document_chain = ( itemgetter("messages") | RunnableLambda(extract_question) | retriever ) print(retrieve_document_chain.invoke({"messages": [{"role": "user", "content": "What is Apache Spark?"}]})) # COMMAND ---------- # MAGIC %md # MAGIC ### Improve document search using LLM to generate a better sentence for the vector store, based on the chat history # MAGIC # MAGIC We need to retrieve documents related the the last question but also the history. # MAGIC # MAGIC One solution is to add a step for our LLM to summarize the history and the last question, making it a better fit for our vector search query. Let's do that as a new step in our chain: # COMMAND ---------- from langchain.schema.runnable import RunnableBranch generate_query_to_retrieve_context_template = """ Based on the chat history below, we want you to generate a query for an external data source to retrieve relevant documents so that we can better answer the question. The query should be in natual language. The external data source uses similarity search to search for relevant documents in a vector space. So the query should be similar to the relevant documents semantically. Answer with only the query. Do not add explanation. Chat history: {chat_history} Question: {question} """ generate_query_to_retrieve_context_prompt = PromptTemplate( input_variables= ["chat_history", "question"], template = generate_query_to_retrieve_context_template ) generate_query_to_retrieve_context_chain = ( { "question": itemgetter("messages") | RunnableLambda(extract_question), "chat_history": itemgetter("messages") | RunnableLambda(extract_history), } | RunnableBranch( #Augment query only when there is a chat history (lambda x: x["chat_history"], generate_query_to_retrieve_context_prompt | chat_model | StrOutputParser()), (lambda x: not x["chat_history"], RunnableLambda(lambda x: x["question"])), RunnableLambda(lambda x: x["question"]) ) ) #Let's try it output = generate_query_to_retrieve_context_chain.invoke({ "messages": [ {"role": "user", "content": "What is Apache Spark?"} ] }) print(f"Test retriever query without history: {output}") output = generate_query_to_retrieve_context_chain.invoke({ "messages": [ {"role": "user", "content": "What is Apache Spark?"}, {"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."}, {"role": "user", "content": "Does it support streaming?"} ] }) print(f"Test retriever question, summarized with history: {output}") # COMMAND ---------- # MAGIC %md-sandbox # MAGIC ## Let's put it together # MAGIC # MAGIC <img src="https://github.com/databricks-demos/dbdemos-resources/blob/main/images/product/chatbot-rag/llm-rag-self-managed-model-2.png?raw=true" style="float: right" width="600px"> # MAGIC # MAGIC # MAGIC Let's now merge the retriever and the full LangChain chain. # MAGIC # MAGIC We will use a custom LangChain template for our assistant to give a proper answer. # MAGIC # MAGIC Make sure you take some time to try different templates and adjust your assistant tone and personality for your requirement. # MAGIC # MAGIC # COMMAND ---------- from langchain.schema.runnable import RunnableBranch, RunnableParallel, RunnablePassthrough question_with_history_and_context_str = """ You are a trustful assistant for Databricks users. You are answering python, coding, SQL, data engineering, spark, data science, AI, ML, Datawarehouse, platform, API or infrastructure, Cloud administration question related to Databricks. If you do not know the answer to a question, you truthfully say you do not know. Read the discussion to get the context of the previous conversation. In the chat discussion, you are referred to as "system". The user is referred to as "user". Discussion: {chat_history} Here's some context which might or might not help you answer: {context} Answer straight, do not repeat the question, do not start with something like: the answer to the question, do not add "AI" in front of your answer, do not say: here is the answer, do not mention the context or the question. Based on this history and context, answer this question: {question} """ question_with_history_and_context_prompt = PromptTemplate( input_variables= ["chat_history", "context", "question"], template = question_with_history_and_context_str ) def format_context(docs): return "\n\n".join([d.page_content for d in docs]) def extract_source_urls(docs): return [d.metadata["url"] for d in docs] relevant_question_chain = ( RunnablePassthrough() | { "relevant_docs": generate_query_to_retrieve_context_prompt | chat_model | StrOutputParser() | retriever, "chat_history": itemgetter("chat_history"), "question": itemgetter("question") } | { "context": itemgetter("relevant_docs") | RunnableLambda(format_context), "sources": itemgetter("relevant_docs") | RunnableLambda(extract_source_urls), "chat_history": itemgetter("chat_history"), "question": itemgetter("question") } | { "prompt": question_with_history_and_context_prompt, "sources": itemgetter("sources") } | { "result": itemgetter("prompt") | chat_model | StrOutputParser(), "sources": itemgetter("sources") } ) irrelevant_question_chain = ( RunnableLambda(lambda x: {"result": 'I cannot answer questions that are not about Databricks.', "sources": []}) ) branch_node = RunnableBranch( (lambda x: "yes" in x["question_is_relevant"].lower(), relevant_question_chain), (lambda x: "no" in x["question_is_relevant"].lower(), irrelevant_question_chain), irrelevant_question_chain ) full_chain = ( { "question_is_relevant": is_about_databricks_chain, "question": itemgetter("messages") | RunnableLambda(extract_question), "chat_history": itemgetter("messages") | RunnableLambda(extract_history), } | branch_node ) # COMMAND ---------- # MAGIC %md # MAGIC Let's try our full chain: # COMMAND ---------- # DBTITLE 1,Asking an out-of-scope question import json non_relevant_dialog = { "messages": [ {"role": "user", "content": "What is Apache Spark?"}, {"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."}, {"role": "user", "content": "Why is the sky blue?"} ] } print(f'Testing with a non relevant question...') response = full_chain.invoke(non_relevant_dialog) display_chat(non_relevant_dialog["messages"], response) # COMMAND ---------- # DBTITLE 1,Asking a relevant question dialog = { "messages": [ {"role": "user", "content": "What is Apache Spark?"}, {"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."}, {"role": "user", "content": "Does it support streaming?"} ] } print(f'Testing with relevant history and question...') response = full_chain.invoke(dialog) display_chat(dialog["messages"], response) # COMMAND ---------- # MAGIC %md # MAGIC ## Register the chatbot model to Unity Catalog # COMMAND ---------- import cloudpickle import langchain from mlflow.models import infer_signature mlflow.set_registry_uri("databricks-uc") model_name = f"{catalog}.{db}.dbdemos_advanced_chatbot_model" with mlflow.start_run(run_name="dbdemos_chatbot_rag") as run: #Get our model signature from input/output input_df = pd.DataFrame({"messages": [dialog]}) output = full_chain.invoke(dialog) signature = infer_signature(input_df, output) model_info = mlflow.langchain.log_model( full_chain, loader_fn=get_retriever, # Load the retriever with DATABRICKS_TOKEN env as secret (for authentication). artifact_path="chain", registered_model_name=model_name, pip_requirements=[ "mlflow==" + mlflow.__version__, "langchain==" + langchain.__version__, "databricks-vectorsearch", "pydantic==2.5.2 --no-binary pydantic", "cloudpickle=="+ cloudpickle.__version__ ], input_example=input_df, signature=signature ) # COMMAND ---------- # MAGIC %md Let's try loading our model # COMMAND ---------- model = mlflow.langchain.load_model(model_info.model_uri) model.invoke(dialog) # COMMAND ---------- # MAGIC %md # MAGIC # MAGIC ## Conclusion # MAGIC # MAGIC We've seen how we can improve our chatbot, adding more advanced capabilities to handle a chat history. # MAGIC # MAGIC As you add capabilities to your model and tune the prompt, it will get harder to evaluate your model performance in a repeatable way. # MAGIC # MAGIC Your new prompt might work well for what you tried to fixed, but could also have impact on other questions. # MAGIC # MAGIC ## Next: Introducing offline model evaluation with MLflow # MAGIC # MAGIC To solve these issue, we need a repeatable way of testing our model answer as part of our LLMOps deployment! # MAGIC # MAGIC Open the next [03-Offline-Evaluation]($./03-Offline-Evaluation) notebook to discover how to evaluate your model.
[ "langchain.schema.output_parser.StrOutputParser", "langchain.embeddings.DatabricksEmbeddings", "langchain.schema.runnable.RunnablePassthrough", "langchain.vectorstores.DatabricksVectorSearch", "langchain.chat_models.ChatDatabricks", "langchain.schema.runnable.RunnableLambda", "langchain.prompts.PromptTemplate" ]
[((2610, 2742), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '"""You are an assistant. Give a short answer to this question: {question}"""'}), "(input_variables=['question'], template=\n 'You are an assistant. Give a short answer to this question: {question}')\n", (2624, 2742), False, 'from langchain.prompts import PromptTemplate\n'), ((2761, 2831), 'langchain.chat_models.ChatDatabricks', 'ChatDatabricks', ([], {'endpoint': '"""databricks-llama-2-70b-chat"""', 'max_tokens': '(500)'}), "(endpoint='databricks-llama-2-70b-chat', max_tokens=500)\n", (2775, 2831), False, 'from langchain.chat_models import ChatDatabricks\n'), ((3347, 3446), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['chat_history', 'question']", 'template': 'prompt_with_history_str'}), "(input_variables=['chat_history', 'question'], template=\n prompt_with_history_str)\n", (3361, 3446), False, 'from langchain.prompts import PromptTemplate\n'), ((5507, 5577), 'langchain.chat_models.ChatDatabricks', 'ChatDatabricks', ([], {'endpoint': '"""databricks-llama-2-70b-chat"""', 'max_tokens': '(200)'}), "(endpoint='databricks-llama-2-70b-chat', max_tokens=200)\n", (5521, 5577), False, 'from langchain.chat_models import ChatDatabricks\n'), ((6409, 6517), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['chat_history', 'question']", 'template': 'is_question_about_databricks_str'}), "(input_variables=['chat_history', 'question'], template=\n is_question_about_databricks_str)\n", (6423, 6517), False, 'from langchain.prompts import PromptTemplate\n'), ((8992, 9048), 'langchain.embeddings.DatabricksEmbeddings', 'DatabricksEmbeddings', ([], {'endpoint': '"""databricks-bge-large-en"""'}), "(endpoint='databricks-bge-large-en')\n", (9012, 9048), False, 'from langchain.embeddings import DatabricksEmbeddings\n'), ((10974, 11093), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['chat_history', 'question']", 'template': 'generate_query_to_retrieve_context_template'}), "(input_variables=['chat_history', 'question'], template=\n generate_query_to_retrieve_context_template)\n", (10988, 11093), False, 'from langchain.prompts import PromptTemplate\n'), ((13980, 14103), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['chat_history', 'context', 'question']", 'template': 'question_with_history_and_context_str'}), "(input_variables=['chat_history', 'context', 'question'],\n template=question_with_history_and_context_str)\n", (13994, 14103), False, 'from langchain.prompts import PromptTemplate\n'), ((15045, 15160), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (["(lambda x: {'result':\n 'I cannot answer questions that are not about Databricks.', 'sources': []})"], {}), "(lambda x: {'result':\n 'I cannot answer questions that are not about Databricks.', 'sources': []})\n", (15059, 15160), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((2873, 2890), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2888, 2890), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((4639, 4656), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (4654, 4656), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((6791, 6808), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (6806, 6808), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((9178, 9275), 'databricks.vector_search.client.VectorSearchClient', 'VectorSearchClient', ([], {'workspace_url': 'host', 'personal_access_token': "os.environ['DATABRICKS_TOKEN']"}), "(workspace_url=host, personal_access_token=os.environ[\n 'DATABRICKS_TOKEN'])\n", (9196, 9275), False, 'from databricks.vector_search.client import VectorSearchClient\n'), ((9434, 9538), 'langchain.vectorstores.DatabricksVectorSearch', 'DatabricksVectorSearch', (['vs_index'], {'text_column': '"""content"""', 'embedding': 'embedding_model', 'columns': "['url']"}), "(vs_index, text_column='content', embedding=\n embedding_model, columns=['url'])\n", (9456, 9538), False, 'from langchain.vectorstores import DatabricksVectorSearch\n'), ((17282, 17315), 'mlflow.models.infer_signature', 'infer_signature', (['input_df', 'output'], {}), '(input_df, output)\n', (17297, 17315), False, 'from mlflow.models import infer_signature\n'), ((9670, 9692), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (9680, 9692), False, 'from operator import itemgetter\n'), ((9700, 9732), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (9714, 9732), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((11590, 11629), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (["(lambda x: x['question'])"], {}), "(lambda x: x['question'])\n", (11604, 11629), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((14984, 15005), 'operator.itemgetter', 'itemgetter', (['"""sources"""'], {}), "('sources')\n", (14994, 15005), False, 'from operator import itemgetter\n'), ((11170, 11192), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (11180, 11192), False, 'from operator import itemgetter\n'), ((11195, 11227), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (11209, 11227), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((11253, 11275), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (11263, 11275), False, 'from operator import itemgetter\n'), ((11278, 11309), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_history'], {}), '(extract_history)\n', (11292, 11309), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((11542, 11581), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (["(lambda x: x['question'])"], {}), "(lambda x: x['question'])\n", (11556, 11581), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((14866, 14887), 'operator.itemgetter', 'itemgetter', (['"""sources"""'], {}), "('sources')\n", (14876, 14887), False, 'from operator import itemgetter\n'), ((14950, 14967), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (14965, 14967), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((15478, 15500), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (15488, 15500), False, 'from operator import itemgetter\n'), ((15503, 15535), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (15517, 15535), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((15557, 15579), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (15567, 15579), False, 'from operator import itemgetter\n'), ((15582, 15613), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_history'], {}), '(extract_history)\n', (15596, 15613), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((11482, 11499), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (11497, 11499), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((14299, 14320), 'langchain.schema.runnable.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (14318, 14320), False, 'from langchain.schema.runnable import RunnableBranch, RunnableParallel, RunnablePassthrough\n'), ((14715, 14741), 'operator.itemgetter', 'itemgetter', (['"""chat_history"""'], {}), "('chat_history')\n", (14725, 14741), False, 'from operator import itemgetter\n'), ((14760, 14782), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (14770, 14782), False, 'from operator import itemgetter\n'), ((14914, 14934), 'operator.itemgetter', 'itemgetter', (['"""prompt"""'], {}), "('prompt')\n", (14924, 14934), False, 'from operator import itemgetter\n'), ((4443, 4465), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (4453, 4465), False, 'from operator import itemgetter\n'), ((4468, 4500), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (4482, 4500), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((4526, 4548), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (4536, 4548), False, 'from operator import itemgetter\n'), ((4551, 4582), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_history'], {}), '(extract_history)\n', (4565, 4582), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((6579, 6601), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (6589, 6601), False, 'from operator import itemgetter\n'), ((6604, 6636), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (6618, 6636), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((6662, 6684), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (6672, 6684), False, 'from operator import itemgetter\n'), ((6687, 6718), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_history'], {}), '(extract_history)\n', (6701, 6718), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((14456, 14482), 'operator.itemgetter', 'itemgetter', (['"""chat_history"""'], {}), "('chat_history')\n", (14466, 14482), False, 'from operator import itemgetter\n'), ((14501, 14523), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (14511, 14523), False, 'from operator import itemgetter\n'), ((14551, 14578), 'operator.itemgetter', 'itemgetter', (['"""relevant_docs"""'], {}), "('relevant_docs')\n", (14561, 14578), False, 'from operator import itemgetter\n'), ((14581, 14611), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['format_context'], {}), '(format_context)\n', (14595, 14611), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((14628, 14655), 'operator.itemgetter', 'itemgetter', (['"""relevant_docs"""'], {}), "('relevant_docs')\n", (14638, 14655), False, 'from operator import itemgetter\n'), ((14658, 14693), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_source_urls'], {}), '(extract_source_urls)\n', (14672, 14693), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((14405, 14422), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (14420, 14422), False, 'from langchain.schema.output_parser import StrOutputParser\n')]
"""A tracer that runs evaluators over completed runs.""" from __future__ import annotations import logging from concurrent.futures import Future, ThreadPoolExecutor from typing import Any, Dict, List, Optional, Sequence, Set, Union from uuid import UUID import langsmith from langsmith.evaluation.evaluator import EvaluationResult from langchain.callbacks import manager from langchain.callbacks.tracers import langchain as langchain_tracer from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.schemas import Run logger = logging.getLogger(__name__) class EvaluatorCallbackHandler(BaseTracer): """A tracer that runs a run evaluator whenever a run is persisted. Parameters ---------- evaluators : Sequence[RunEvaluator] The run evaluators to apply to all top level runs. max_workers : int, optional The maximum number of worker threads to use for running the evaluators. If not specified, it will default to the number of evaluators. client : LangSmith Client, optional The LangSmith client instance to use for evaluating the runs. If not specified, a new instance will be created. example_id : Union[UUID, str], optional The example ID to be associated with the runs. project_name : str, optional The LangSmith project name to be organize eval chain runs under. Attributes ---------- example_id : Union[UUID, None] The example ID associated with the runs. client : Client The LangSmith client instance used for evaluating the runs. evaluators : Sequence[RunEvaluator] The sequence of run evaluators to be executed. executor : ThreadPoolExecutor The thread pool executor used for running the evaluators. futures : Set[Future] The set of futures representing the running evaluators. skip_unfinished : bool Whether to skip runs that are not finished or raised an error. project_name : Optional[str] The LangSmith project name to be organize eval chain runs under. """ name = "evaluator_callback_handler" def __init__( self, evaluators: Sequence[langsmith.RunEvaluator], max_workers: Optional[int] = None, client: Optional[langsmith.Client] = None, example_id: Optional[Union[UUID, str]] = None, skip_unfinished: bool = True, project_name: Optional[str] = "evaluators", **kwargs: Any, ) -> None: super().__init__(**kwargs) self.example_id = ( UUID(example_id) if isinstance(example_id, str) else example_id ) self.client = client or langchain_tracer.get_client() self.evaluators = evaluators self.max_workers = max_workers or len(evaluators) self.futures: Set[Future] = set() self.skip_unfinished = skip_unfinished self.project_name = project_name self.logged_eval_results: Dict[str, List[EvaluationResult]] = {} def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None: """Evaluate the run in the project. Parameters ---------- run : Run The run to be evaluated. evaluator : RunEvaluator The evaluator to use for evaluating the run. """ try: if self.project_name is None: eval_result = self.client.evaluate_run(run, evaluator) with manager.tracing_v2_enabled( project_name=self.project_name, tags=["eval"], client=self.client ): eval_result = self.client.evaluate_run(run, evaluator) except Exception as e: logger.error( f"Error evaluating run {run.id} with " f"{evaluator.__class__.__name__}: {e}", exc_info=True, ) raise e example_id = str(run.reference_example_id) self.logged_eval_results.setdefault(example_id, []).append(eval_result) def _persist_run(self, run: Run) -> None: """Run the evaluator on the run. Parameters ---------- run : Run The run to be evaluated. """ if self.skip_unfinished and not run.outputs: logger.debug(f"Skipping unfinished run {run.id}") return run_ = run.copy() run_.reference_example_id = self.example_id if self.max_workers > 0: with ThreadPoolExecutor(max_workers=self.max_workers) as executor: list( executor.map( self._evaluate_in_project, [run_ for _ in range(len(self.evaluators))], self.evaluators, ) ) else: for evaluator in self.evaluators: self._evaluate_in_project(run_, evaluator)
[ "langchain.callbacks.tracers.langchain.get_client", "langchain.callbacks.manager.tracing_v2_enabled" ]
[((562, 589), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (579, 589), False, 'import logging\n'), ((2581, 2597), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2585, 2597), False, 'from uuid import UUID\n'), ((2687, 2716), 'langchain.callbacks.tracers.langchain.get_client', 'langchain_tracer.get_client', ([], {}), '()\n', (2714, 2716), True, 'from langchain.callbacks.tracers import langchain as langchain_tracer\n'), ((3489, 3586), 'langchain.callbacks.manager.tracing_v2_enabled', 'manager.tracing_v2_enabled', ([], {'project_name': 'self.project_name', 'tags': "['eval']", 'client': 'self.client'}), "(project_name=self.project_name, tags=['eval'],\n client=self.client)\n", (3515, 3586), False, 'from langchain.callbacks import manager\n'), ((4506, 4554), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'self.max_workers'}), '(max_workers=self.max_workers)\n', (4524, 4554), False, 'from concurrent.futures import Future, ThreadPoolExecutor\n')]
import streamlit as st import dotenv import langchain import json from cassandra.cluster import Session from cassandra.query import PreparedStatement from langchain.agents.agent_toolkits import create_retriever_tool, create_conversational_retrieval_agent from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.callbacks import StreamlitCallbackHandler from langchain.schema import BaseRetriever, Document, SystemMessage #from langchain.cache import CassandraSemanticCache from cassandra.cluster import Cluster, Session from cassandra.auth import PlainTextAuthProvider # Enable langchain debug mode #langchain.debug = True dotenv.load_dotenv(dotenv.find_dotenv()) class AstraProductRetriever(BaseRetriever): session: Session embedding: OpenAIEmbeddings lang: str = "Japanese" search_statement_en: PreparedStatement = None search_statement_ja: PreparedStatement = None class Config: arbitrary_types_allowed = True def get_relevant_documents(self, query): docs = [] embeddingvector = self.embedding.embed_query(query) if self.lang == "Japanese": if self.search_statement_ja is None: self.search_statement_ja = self.session.prepare(""" SELECT id, similarity_cosine(sem_vec, ?) as similarity, title, author, publisher, price, description FROM app.book_openai ORDER BY sem_vec ANN OF ? LIMIT ? """) query = self.search_statement_ja else: if self.search_statement_en is None: self.search_statement_en = self.session.prepare(""" SELECT id, similarity_cosine(sem_vec, ?) as similarity, title, author, publisher, price, description FROM app.book_openai_en ORDER BY sem_vec ANN OF ? LIMIT ? """) query = self.search_statement_en results = self.session.execute(query, [embeddingvector, embeddingvector, 5]) top_products = results._current_rows for r in top_products: if r.similarity > 0.91: docs.append(Document( id=r.id, page_content=r.title, metadata={"product id": r.id, "title": r.title, "author": r.author, "publisher": r.publisher, "description": r.description, "price": r.price } )) return docs def get_session(scb: str, secrets: str) -> Session: """ Connect to Astra DB using secure connect bundle and credentials. Parameters ---------- scb : str Path to secure connect bundle. secrets : str Path to credentials. """ cloud_config = { 'secure_connect_bundle': scb } with open(secrets) as f: secrets = json.load(f) CLIENT_ID = secrets["clientId"] CLIENT_SECRET = secrets["secret"] auth_provider = PlainTextAuthProvider(CLIENT_ID, CLIENT_SECRET) cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider) return cluster.connect() @st.cache_resource def create_chatbot(lang: str): print(f"Creating chatbot for {lang}...") session = get_session(scb='./config/secure-connect-demo.zip', secrets='./config/demo-token.json') llm = ChatOpenAI(temperature=0, streaming=True) embedding = OpenAIEmbeddings() #langchain.llm_cache = CassandraSemanticCache(session=session, # keyspace="bookstore", # embedding=embedding, # table_name="cass_sem_cache") retriever = AstraProductRetriever( session=session, embedding=embedding, lang=lang) retriever_tool = create_retriever_tool( retriever, "books_retrevier", "Useful when searching for books from a book store. Prices are in YEN.") system_message = """ You are a customer service of a book store and you are asked to pick books for a customer. You must try to find books related to given questions first. You must use the books_retreiver. You must not provide any information other than books that you get from books_retriever. You should behave as a bookstore clerk. """ if lang == "Japanese": system_message = f"{system_message} All the responses should be in Japanese language." message = SystemMessage(content=system_message) agent_executor = create_conversational_retrieval_agent( llm=llm, tools=[retriever_tool], system_message=message, verbose=True) return agent_executor if 'history' not in st.session_state: st.session_state['history'] = { "English": [], "Japanese" : [] } st.set_page_config(layout="wide") #with st.sidebar: # lang = st.radio( # "Chat language", # ["English", "Japanese"], # captions=[".", "Experimental", "."]) lang = "Japanese" chatbot = create_chatbot(lang) # Display chat messages from history on app rerun for (query, answer) in st.session_state['history'][lang]: with st.chat_message("User"): st.markdown(query) with st.chat_message("Bot"): st.markdown(answer) prompt = st.chat_input(placeholder="Ask chatbot") if prompt: # Display user message in chat message container with st.chat_message("User"): st.markdown(prompt) # Display assistant response in chat message container with st.chat_message("Bot"): st_callback = StreamlitCallbackHandler(st.container()) #result = result = chatbot.invoke({ result = chatbot.invoke({ "input": prompt, "chat_history": st.session_state['history'][lang] }, config={"callbacks": [st_callback]}) st.session_state['history'][lang].append((prompt, result["output"])) st.markdown(result["output"])
[ "langchain.chat_models.ChatOpenAI", "langchain.schema.Document", "langchain.agents.agent_toolkits.create_conversational_retrieval_agent", "langchain.agents.agent_toolkits.create_retriever_tool", "langchain.schema.SystemMessage", "langchain.embeddings.OpenAIEmbeddings" ]
[((5375, 5408), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (5393, 5408), True, 'import streamlit as st\n'), ((5847, 5887), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Ask chatbot"""'}), "(placeholder='Ask chatbot')\n", (5860, 5887), True, 'import streamlit as st\n'), ((702, 722), 'dotenv.find_dotenv', 'dotenv.find_dotenv', ([], {}), '()\n', (720, 722), False, 'import dotenv\n'), ((3536, 3583), 'cassandra.auth.PlainTextAuthProvider', 'PlainTextAuthProvider', (['CLIENT_ID', 'CLIENT_SECRET'], {}), '(CLIENT_ID, CLIENT_SECRET)\n', (3557, 3583), False, 'from cassandra.auth import PlainTextAuthProvider\n'), ((3598, 3654), 'cassandra.cluster.Cluster', 'Cluster', ([], {'cloud': 'cloud_config', 'auth_provider': 'auth_provider'}), '(cloud=cloud_config, auth_provider=auth_provider)\n', (3605, 3654), False, 'from cassandra.cluster import Cluster, Session\n'), ((3919, 3960), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'streaming': '(True)'}), '(temperature=0, streaming=True)\n', (3929, 3960), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3977, 3995), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (3993, 3995), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((4404, 4532), 'langchain.agents.agent_toolkits.create_retriever_tool', 'create_retriever_tool', (['retriever', '"""books_retrevier"""', '"""Useful when searching for books from a book store. Prices are in YEN."""'], {}), "(retriever, 'books_retrevier',\n 'Useful when searching for books from a book store. Prices are in YEN.')\n", (4425, 4532), False, 'from langchain.agents.agent_toolkits import create_retriever_tool, create_conversational_retrieval_agent\n'), ((5042, 5079), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message'}), '(content=system_message)\n', (5055, 5079), False, 'from langchain.schema import BaseRetriever, Document, SystemMessage\n'), ((5101, 5213), 'langchain.agents.agent_toolkits.create_conversational_retrieval_agent', 'create_conversational_retrieval_agent', ([], {'llm': 'llm', 'tools': '[retriever_tool]', 'system_message': 'message', 'verbose': '(True)'}), '(llm=llm, tools=[retriever_tool],\n system_message=message, verbose=True)\n', (5138, 5213), False, 'from langchain.agents.agent_toolkits import create_retriever_tool, create_conversational_retrieval_agent\n'), ((3427, 3439), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3436, 3439), False, 'import json\n'), ((5724, 5747), 'streamlit.chat_message', 'st.chat_message', (['"""User"""'], {}), "('User')\n", (5739, 5747), True, 'import streamlit as st\n'), ((5757, 5775), 'streamlit.markdown', 'st.markdown', (['query'], {}), '(query)\n', (5768, 5775), True, 'import streamlit as st\n'), ((5785, 5807), 'streamlit.chat_message', 'st.chat_message', (['"""Bot"""'], {}), "('Bot')\n", (5800, 5807), True, 'import streamlit as st\n'), ((5817, 5836), 'streamlit.markdown', 'st.markdown', (['answer'], {}), '(answer)\n', (5828, 5836), True, 'import streamlit as st\n'), ((5961, 5984), 'streamlit.chat_message', 'st.chat_message', (['"""User"""'], {}), "('User')\n", (5976, 5984), True, 'import streamlit as st\n'), ((5994, 6013), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (6005, 6013), True, 'import streamlit as st\n'), ((6082, 6104), 'streamlit.chat_message', 'st.chat_message', (['"""Bot"""'], {}), "('Bot')\n", (6097, 6104), True, 'import streamlit as st\n'), ((6471, 6500), 'streamlit.markdown', 'st.markdown', (["result['output']"], {}), "(result['output'])\n", (6482, 6500), True, 'import streamlit as st\n'), ((6153, 6167), 'streamlit.container', 'st.container', ([], {}), '()\n', (6165, 6167), True, 'import streamlit as st\n'), ((2592, 2782), 'langchain.schema.Document', 'Document', ([], {'id': 'r.id', 'page_content': 'r.title', 'metadata': "{'product id': r.id, 'title': r.title, 'author': r.author, 'publisher': r.\n publisher, 'description': r.description, 'price': r.price}"}), "(id=r.id, page_content=r.title, metadata={'product id': r.id,\n 'title': r.title, 'author': r.author, 'publisher': r.publisher,\n 'description': r.description, 'price': r.price})\n", (2600, 2782), False, 'from langchain.schema import BaseRetriever, Document, SystemMessage\n')]
import langchain_helper as lch import streamlit as st st.title('pets name generator') user_animal_type = st.sidebar.selectbox('what is your pet', ('cat', 'dog', 'cow')) if user_animal_type == 'cat': user_pet_color = st.sidebar.text_area('what color is your cat', max_chars=15) if user_animal_type == 'dog': user_pet_color = st.sidebar.text_area('what color is your dog', max_chars=15) if user_animal_type == 'cow': user_pet_color = st.sidebar.text_area('what color is your cow', max_chars=15) if user_pet_color: response = lch.generate_pet_name(user_animal_type, user_pet_color) st.text(response['pet_name'])
[ "langchain_helper.generate_pet_name" ]
[((55, 86), 'streamlit.title', 'st.title', (['"""pets name generator"""'], {}), "('pets name generator')\n", (63, 86), True, 'import streamlit as st\n'), ((107, 170), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""what is your pet"""', "('cat', 'dog', 'cow')"], {}), "('what is your pet', ('cat', 'dog', 'cow'))\n", (127, 170), True, 'import streamlit as st\n'), ((223, 283), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""what color is your cat"""'], {'max_chars': '(15)'}), "('what color is your cat', max_chars=15)\n", (243, 283), True, 'import streamlit as st\n'), ((335, 395), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""what color is your dog"""'], {'max_chars': '(15)'}), "('what color is your dog', max_chars=15)\n", (355, 395), True, 'import streamlit as st\n'), ((447, 507), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""what color is your cow"""'], {'max_chars': '(15)'}), "('what color is your cow', max_chars=15)\n", (467, 507), True, 'import streamlit as st\n'), ((545, 600), 'langchain_helper.generate_pet_name', 'lch.generate_pet_name', (['user_animal_type', 'user_pet_color'], {}), '(user_animal_type, user_pet_color)\n', (566, 600), True, 'import langchain_helper as lch\n'), ((605, 634), 'streamlit.text', 'st.text', (["response['pet_name']"], {}), "(response['pet_name'])\n", (612, 634), True, 'import streamlit as st\n')]
# import modules import telebot from telebot import * import logging import sqlite3 import os import langchain from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.document_loaders import TextLoader from langchain.document_loaders import DirectoryLoader from langchain.vectorstores import Chroma from langchain.prompts import PromptTemplate from langchain.chat_models import ChatOpenAI from langchain.chains import RetrievalQA # connect to the database conn = sqlite3.connect(r"main.db", check_same_thread=False) cur = conn.cursor() # start logging logging.basicConfig(level=logging.INFO, filename="../info.log", filemode='w') # init a bot with token from file bot_token_file = open("bot_token.txt", "r") API_KEY = bot_token_file.readline() bot_token_file.close() bot = telebot.TeleBot("7174085128:AAGfMlZh5wUoV3vXfoGOYtb9vkN3SbqOmAE") # set the openai token token_file = open("openai_token.txt", "r") token = token_file.readline() token_file.close() os.environ["OPENAI_API_KEY"] = token docs_k = 65 # const number_of_goods = 6 # const goods = ["Philips EP2231/40", "Nivona CafeRomatica NICR 550", # list of goods "Delonghi ECAM 370.70.B", "Polaris PACM 2065AC", "Philips EP2030/10", "REDMOND RCM-1517"] langchain.debug = False # debug is off # read the vector databases vectordb_list = [] embedding = OpenAIEmbeddings() for i in range(number_of_goods): vectordb_list.append(Chroma(embedding_function=embedding, persist_directory="../output/"+str(i))) for vectordb in vectordb_list: print(vectordb._collection.count()) def get_info(itemID): question = "Tell us about this coffee machine" template = """You are a useful AI consultant for our household appliances store selling coffee machines. Your task is to describe this coffee machine. Talk only about the merits. Use the following pieces of context (Context) to answer the question (Question) at the end. If you don't know the answer, just say you don't know, don't try to make up an answer. First, make sure the attached text is relevant to the question. If the question does not relate to the text, answer that you cannot answer this question. Use a maximum of 15 sentences. Give your answer as clearly as possible, briefly describing all the advantages of this particular coffee machine. Context: {context} Question: {question}""" QA_CHAIN_PROMPT = PromptTemplate.from_template(template) vectordb = vectordb_list[itemID] retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": docs_k}) llm = ChatOpenAI( model_name="gpt-3.5-turbo", temperature=0, max_tokens = 250) qa_chain = RetrievalQA.from_chain_type( llm, retriever=retriever, return_source_documents=True, chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}) result = qa_chain({"query": question}) return result["result"] def get_answer(itemID, question): template = """You are a useful AI consultant for our household appliances store selling coffee machines. Your task is to clearly answer the buyer's question. Use the following pieces of context (Context) to answer the question (Question) at the end. If you don't know the answer, just say you don't know, don't try to make up an answer. First, make sure the attached text is relevant to the question. If the question does not relate to the text, answer that you cannot answer this question. Use a maximum of 15 sentences. Make your answer as clear as possible. Speak competently. Context: {context} Question: {question}""" QA_CHAIN_PROMPT = PromptTemplate.from_template(template) vectordb = vectordb_list[itemID] retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": docs_k}) llm = ChatOpenAI( model_name="gpt-3.5-turbo", temperature=0, max_tokens = 250) qa_chain = RetrievalQA.from_chain_type( llm, retriever=retriever, return_source_documents=True, chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}) result = qa_chain({"query": question}) return result["result"] def check_step(step, id): cur.execute("SELECT status FROM user WHERE userID = ?", (id,)) fetch_result = cur.fetchone() if step in fetch_result: return True else: return False def get_itemID(userID): cur.execute("SELECT itemID FROM user WHERE userID = ?", (userID,)) fetch_result = cur.fetchone() return fetch_result[0] @bot.message_handler(commands=["start"]) def start_message(message): keyboard = types.ReplyKeyboardMarkup( resize_keyboard = True, one_time_keyboard=True ) zero_machine = types.KeyboardButton(text="Philips EP2231/40") first_machine = types.KeyboardButton(text="Nivona CafeRomatica NICR 550") second_machine = types.KeyboardButton(text="Delonghi ECAM 370.70.B") third_machine = types.KeyboardButton(text="Polaris PACM 2065AC") fourth_machine = types.KeyboardButton(text="Philips EP2030/10") fifth_machine = types.KeyboardButton(text="REDMOND RCM-1517") keyboard.row(zero_machine, first_machine) keyboard.row(second_machine, third_machine) keyboard.row(fourth_machine, fifth_machine) bot.send_message(message.chat.id, "Main menu", reply_markup=keyboard) try: cur.execute("INSERT INTO user VALUES (?, ?, ?);", (message.chat.id, "menu", 0)) except: cur.execute("UPDATE user SET status = ? WHERE userID = ?;", ("menu", message.chat.id)) conn.commit() @bot.message_handler(content_types="text", func=lambda message: check_step("menu", message.chat.id)) def machine_description(message): if message.text in goods: keyboard = types.ReplyKeyboardMarkup( resize_keyboard=True, one_time_keyboard=True ) back_to_menu_button = types.KeyboardButton(text="Back to Menu") keyboard.add(back_to_menu_button) bot.send_message(message.chat.id, """Request accepted. Wait for a response...\nYou selected -> {}""".format(message.text)) description = get_info(goods.index(message.text)) bot.send_message(message.chat.id, description) bot.send_message(message.chat.id, """You can now ask questions about this product or return to the main menu to view another one.""", reply_markup=keyboard) # change user status in db cur.execute("UPDATE user SET status = ?, itemID = ? WHERE userID = ?;", ("chat", goods.index(message.text), message.chat.id)) conn.commit() else: bot.send_message(message.chat.id, "Request rejected. You entered wrong product name!") @bot.message_handler(content_types="text", func= lambda message: check_step("chat", message.chat.id)) def chat_with_ai(message): keyboard = types.ReplyKeyboardMarkup( resize_keyboard=True, one_time_keyboard=True ) back_to_menu_button = types.KeyboardButton(text="Back to Menu") keyboard.add(back_to_menu_button) if message.text == back_to_menu_button.text: bot.send_message(message.chat.id, "Returning back to Menu") cur.execute("UPDATE user SET status = ? WHERE userID = ?;", ("menu", message.chat.id)) conn.commit() keyboard = types.ReplyKeyboardMarkup( resize_keyboard = True, one_time_keyboard=True ) zero_machine = types.KeyboardButton(text="Philips EP2231/40") first_machine = types.KeyboardButton(text="Nivona CafeRomatica NICR 550") second_machine = types.KeyboardButton(text="Delonghi ECAM 370.70.B") third_machine = types.KeyboardButton(text="Polaris PACM 2065AC") fourth_machine = types.KeyboardButton(text="Philips EP2030/10") fifth_machine = types.KeyboardButton(text="REDMOND RCM-1517") keyboard.row(zero_machine, first_machine) keyboard.row(second_machine, third_machine) keyboard.row(fourth_machine, fifth_machine) bot.send_message(message.chat.id, "Main menu", reply_markup=keyboard) else: itemID = get_itemID(message.chat.id) answer = get_answer(itemID, message.text) bot.send_message(message.chat.id, answer, reply_markup=keyboard) bot.infinity_polling(timeout=10, long_polling_timeout = 5)
[ "langchain.prompts.PromptTemplate.from_template", "langchain.chains.RetrievalQA.from_chain_type", "langchain.chat_models.ChatOpenAI", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((571, 622), 'sqlite3.connect', 'sqlite3.connect', (['"""main.db"""'], {'check_same_thread': '(False)'}), "('main.db', check_same_thread=False)\n", (586, 622), False, 'import sqlite3\n'), ((661, 738), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'filename': '"""../info.log"""', 'filemode': '"""w"""'}), "(level=logging.INFO, filename='../info.log', filemode='w')\n", (680, 738), False, 'import logging\n'), ((883, 948), 'telebot.TeleBot', 'telebot.TeleBot', (['"""7174085128:AAGfMlZh5wUoV3vXfoGOYtb9vkN3SbqOmAE"""'], {}), "('7174085128:AAGfMlZh5wUoV3vXfoGOYtb9vkN3SbqOmAE')\n", (898, 948), False, 'import telebot\n'), ((1436, 1454), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1452, 1454), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2592, 2630), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (2620, 2630), False, 'from langchain.prompts import PromptTemplate\n'), ((2776, 2845), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'max_tokens': '(250)'}), "(model_name='gpt-3.5-turbo', temperature=0, max_tokens=250)\n", (2786, 2845), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2888, 3027), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', (['llm'], {'retriever': 'retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': QA_CHAIN_PROMPT}"}), "(llm, retriever=retriever,\n return_source_documents=True, chain_type_kwargs={'prompt': QA_CHAIN_PROMPT}\n )\n", (2915, 3027), False, 'from langchain.chains import RetrievalQA\n'), ((3901, 3939), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (3929, 3939), False, 'from langchain.prompts import PromptTemplate\n'), ((4085, 4154), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'max_tokens': '(250)'}), "(model_name='gpt-3.5-turbo', temperature=0, max_tokens=250)\n", (4095, 4154), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4197, 4336), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', (['llm'], {'retriever': 'retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': QA_CHAIN_PROMPT}"}), "(llm, retriever=retriever,\n return_source_documents=True, chain_type_kwargs={'prompt': QA_CHAIN_PROMPT}\n )\n", (4224, 4336), False, 'from langchain.chains import RetrievalQA\n')]
import logging import os import langchain from langchain_community.llms import Ollama from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough from langchain_openai import OpenAI from crawler import crawl_and_store from document_store import get_faiss from template_prompt import Legal_Template langchain.verbose = os.getenv('VERBOSE') == "True" def get_model(base_url: str, model_name: str = "mistral"): """ Get the LLM that is used. When the model is "open-ai" we will use the GPT3.5 model by OpenAI Otherwise try to access a local model that is running on the private machine using Ollama https://github.com/ollama/ollama :param base_url: If an Ollama model is used this endpoint will be queried. :param model_name: The name of the model that will be passed to Ollama. :return: LangChain LLM model """ if model_name == "open-ai": return OpenAI() ollama = Ollama(base_url=base_url, model=model_name) return ollama def get_chain(document_path, model_name, model_url, max_depth, start_legislation, metadata_path, html_dir, fixed_document_path): # Check if env file exists. Otherwise, copy the example and exit with an error to modify the file. if not os.path.exists(".env"): logging.error("Please copy the .env.example file to .env and configure it.") exit(1) # Check if the document path exists. Otherwise, create it. # Also check if the fixed document path exists. Otherwise, create it. # Also check if any documents are in the document path if not os.path.exists(document_path) or not os.listdir(document_path): if not os.path.exists(document_path): logging.warning(f"Document path {document_path} does not exist. Creating it.") os.makedirs(document_path) logging.warning(f"Document path {document_path} is empty. Crawling documents.") crawl_and_store( start_legislation=start_legislation, metadata=metadata_path, html_dir=html_dir, max_depth=max_depth ) logging.info(f"Finished crawling documents. Crawled {len(os.listdir(document_path))} documents.") # get the LLM model = get_model(model_url, model_name) logging.info(f"Starting LegalLLM using {os.getenv('MODEL')} as LLM.") # choose task tasks_retriever = Legal_Template() task_template = tasks_retriever.task1_template() system = """ You are an AI assistant for answering questions about about EU taxonomy law. Use the given pieces of context and question-answer examples to answer the user's question. If you don't know the answer, just say "I don't know the answer." Don't make up an answer. """ human = """ Use the provided context extracted from relevant documents and the following three question-answer examples about the EU taxonomy Laws to help you answer the fourth question at the end. If you don't know the answer based on the context, just say "I don't know the answer." Don't make up an answer. ---------------- CONTEXT: {context} ---------------- QUESTION: What is a Taxonomy-eligible economic activity? ANSWER: {{ "answer": "Article 1(5) of the Disclosures Delegated Act defines an eligible economic activity as an activity that is described in the delegated acts adopted under Article 10(3), Article 11(3), Article 12(2), Article 13(2), Article 14(2) and Article 15(2) of the Taxonomy Regulation. More specifically, according to Article 1(5) of the Disclosures Delegated Act, an economic activity is eligible irrespective of whether it meets any or all of the technical screening criteria laid down in the Climate Delegated Act (and future delegated acts). Therefore, the fact that an economic activity is Taxonomy-eligible does not give any indication of the environmental performance and sustainability of that activity.", "articles": ["Article 1.5", "Article 10.3", "Article 11.3", "Article 12.2", "Article 13.2", "Article 14.2", "Article 15.2"] }} QUESTION: What are 'enabling' and 'transitional' economic activities in the context of Taxonomy-eligibility reporting? ANSWER: {{ "answer": "Articles 16 and 10(2) of the Taxonomy Regulation define enabling and transitional economic activities", "articles": ["Article 16", "Article 10.2"] }} QUESTION: How to identify and report eligibility for adaptation-related economic activities? ANSWER: {{ "answer": "Adaptation activities, i.e. activities including adaptation solutions in accordance with Article 11 (1)(a) of the Taxonomy Regulation. Enabling economic activities, i.e. activities providing adaptation solutions in accordance with Article 11 (1)(b) of the Taxonomy Regulation.", "articles": ["Article 11 1.a", "Article 11 1.b"] }} QUESTION: {question} ANSWER: Let's think step by step and only answer based on the context, if you dont have an answer, based on the context, just say "I don't know the answer." Stop right there with no additional information to the unrelated question. You must format your output as a JSON value that adheres to a given "JSON Schema" instance. The Format has a answer and articles key, based on the answer, where articels has only the type(str). """ # init task prompt = ChatPromptTemplate.from_messages([ ("system", system), ("user", human) ]) retriever = get_faiss( model_name=model_name, model_url=model_url, document_path=document_path, fixed_document_path=fixed_document_path ).as_retriever(search_kwargs={"k": 4}) chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | model | StrOutputParser() ) return chain
[ "langchain_core.runnables.RunnablePassthrough", "langchain_core.output_parsers.StrOutputParser", "langchain_community.llms.Ollama", "langchain_core.prompts.ChatPromptTemplate.from_messages", "langchain_openai.OpenAI" ]
[((430, 450), 'os.getenv', 'os.getenv', (['"""VERBOSE"""'], {}), "('VERBOSE')\n", (439, 450), False, 'import os\n'), ((1029, 1072), 'langchain_community.llms.Ollama', 'Ollama', ([], {'base_url': 'base_url', 'model': 'model_name'}), '(base_url=base_url, model=model_name)\n', (1035, 1072), False, 'from langchain_community.llms import Ollama\n'), ((2481, 2497), 'template_prompt.Legal_Template', 'Legal_Template', ([], {}), '()\n', (2495, 2497), False, 'from template_prompt import Legal_Template\n'), ((5632, 5703), 'langchain_core.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', system), ('user', human)]"], {}), "([('system', system), ('user', human)])\n", (5664, 5703), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((1006, 1014), 'langchain_openai.OpenAI', 'OpenAI', ([], {}), '()\n', (1012, 1014), False, 'from langchain_openai import OpenAI\n'), ((1350, 1372), 'os.path.exists', 'os.path.exists', (['""".env"""'], {}), "('.env')\n", (1364, 1372), False, 'import os\n'), ((1382, 1458), 'logging.error', 'logging.error', (['"""Please copy the .env.example file to .env and configure it."""'], {}), "('Please copy the .env.example file to .env and configure it.')\n", (1395, 1458), False, 'import logging\n'), ((1932, 2011), 'logging.warning', 'logging.warning', (['f"""Document path {document_path} is empty. Crawling documents."""'], {}), "(f'Document path {document_path} is empty. Crawling documents.')\n", (1947, 2011), False, 'import logging\n'), ((2020, 2140), 'crawler.crawl_and_store', 'crawl_and_store', ([], {'start_legislation': 'start_legislation', 'metadata': 'metadata_path', 'html_dir': 'html_dir', 'max_depth': 'max_depth'}), '(start_legislation=start_legislation, metadata=metadata_path,\n html_dir=html_dir, max_depth=max_depth)\n', (2035, 2140), False, 'from crawler import crawl_and_store\n'), ((6082, 6099), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (6097, 6099), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((1683, 1712), 'os.path.exists', 'os.path.exists', (['document_path'], {}), '(document_path)\n', (1697, 1712), False, 'import os\n'), ((1720, 1745), 'os.listdir', 'os.listdir', (['document_path'], {}), '(document_path)\n', (1730, 1745), False, 'import os\n'), ((1762, 1791), 'os.path.exists', 'os.path.exists', (['document_path'], {}), '(document_path)\n', (1776, 1791), False, 'import os\n'), ((1805, 1883), 'logging.warning', 'logging.warning', (['f"""Document path {document_path} does not exist. Creating it."""'], {}), "(f'Document path {document_path} does not exist. Creating it.')\n", (1820, 1883), False, 'import logging\n'), ((1896, 1922), 'os.makedirs', 'os.makedirs', (['document_path'], {}), '(document_path)\n', (1907, 1922), False, 'import os\n'), ((5743, 5871), 'document_store.get_faiss', 'get_faiss', ([], {'model_name': 'model_name', 'model_url': 'model_url', 'document_path': 'document_path', 'fixed_document_path': 'fixed_document_path'}), '(model_name=model_name, model_url=model_url, document_path=\n document_path, fixed_document_path=fixed_document_path)\n', (5752, 5871), False, 'from document_store import get_faiss\n'), ((2410, 2428), 'os.getenv', 'os.getenv', (['"""MODEL"""'], {}), "('MODEL')\n", (2419, 2428), False, 'import os\n'), ((6004, 6025), 'langchain_core.runnables.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (6023, 6025), False, 'from langchain_core.runnables import RunnablePassthrough\n'), ((2261, 2286), 'os.listdir', 'os.listdir', (['document_path'], {}), '(document_path)\n', (2271, 2286), False, 'import os\n')]
# Standard Library Imports import ast import json import os import re # Third-Party Imports import textwrap from typing import Any, Dict, List, Optional, Type import langchain import streamlit as st from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.tools import BaseTool from openmm import ( AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit, ) from openmm.app import ( PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter, ) from openmm.unit import bar, femtoseconds, kelvin, nanometers, picosecond, picoseconds from pydantic import BaseModel, Field from mdagent.tools.base_tools.preprocess_tools import CleaningTools # Local Library/Application Imports from mdagent.utils import FileType, PathRegistry # TODO delete files created from the simulation if not needed. FORCEFIELD_LIST = [ "amber14/DNA.OL15.xml", "amber14/DNA.bsc1.xml", "amber14/RNA.OL3.xml", "amber14/lipid17.xml", "amber14/protein.ff14SB.xml", "amber14/protein.ff15ipq.xml", "amber14/spce.xml", "amber14/tip3p.xml", "amber14/tip3pfb.xml", "amber14/tip4pew.xml", "amber14/tip4pfb.xml", "charmm36/spce.xml", "charmm36/tip3p-pme-b.xml", "charmm36/tip3p-pme-f.xml", "charmm36/tip4p2005.xml", "charmm36/tip4pew.xml", "charmm36/tip5p.xml", "charmm36/tip5pew.xml", "charmm36/water.xml", "absinth.xml", "amber03.xml", "amber03_obc.xml", "amber10.xml", "amber10_obc.xml", "amber14-all", "amber14-all.xml", "amber96.xml", "amber96_obc.xml", "amber99Test.xml", "amber99_obc.xml", "amber99sb.xml", "amber99sbildn.xml", "amber99sbnmr.xml", "amberfb15.xml", "amoeba2009.xml", "amoeba2009_gk.xml", "amoeba2013.xml", "amoeba2013_gk.xml", "charmm36.xml", "charmm_polar_2013.xml", "hydrogens.xml", "iamoeba.xml", "pdbNames.xml", "residues.xml", "spce.xml", "swm4ndp.xml", "tip3p.xml", "tip3pfb.xml", "tip4pew.xml", "tip4pfb.xml", "tip5p.xml", ] class SimulationFunctions: def __init__( self, path_registry, temperature: float = 0.05, model_name: str = "gpt-4", request_timeout: int = 1000, max_tokens: int = 2000, ): self.path_registry = path_registry self.temperature = temperature self.model_name = model_name self.request_timeout = request_timeout self.max_tokens = max_tokens self.llm = langchain.chat_models.ChatOpenAI( temperature=self.temperature, model_name=self.model_name, request_timeout=self.request_timeout, max_tokens=self.request_timeout, ) #######==================System Congifuration==================######## # System Configuration initialization. def _create_system( pdb, forcefield, nonbondedMethod="NoCutoff", nonbondedCutoff=None, ewaldErrorTolerance=None, constraints="None", rigidWater=False, constraintTolerance=None, **kwargs, ): # Create a dictionary to hold system parameters system_params = { "nonbondedMethod": nonbondedMethod, "constraints": constraints, "rigidWater": rigidWater, } # Set nonbondedCutoff if applicable if ( nonbondedMethod in ["PME", "CutoffNonPeriodic", "CutoffPeriodic"] and nonbondedCutoff is not None ): system_params["nonbondedCutoff"] = nonbondedCutoff # Set ewaldErrorTolerance if PME is used if nonbondedMethod == "PME" and ewaldErrorTolerance is not None: system_params["ewaldErrorTolerance"] = ewaldErrorTolerance # Set constraintTolerance if constraints are used if constraints in ["HBonds", " AllBonds"] and constraintTolerance is not None: system_params["constraintTolerance"] = constraintTolerance elif system_params["rigidWater"] and constraintTolerance is not None: system_params["constraintTolerance"] = constraintTolerance # Update system_params with any additional parameters provided system_params.update(kwargs) system = forcefield.createSystem(pdb.topology, **system_params) return system ########==================Integrator==================######## # Integrator def _define_integrator( integrator_type="LangevinMiddle", temperature=300 * kelvin, friction=1.0 / picoseconds, timestep=0.004 * picoseconds, **kwargs, ): # Create a dictionary to hold integrator parameters integrator_params = { "temperature": temperature, "friction": friction, "timestep": timestep, } # Update integrator_params with any additional parameters provided integrator_params.update(kwargs) # Create the integrator if integrator_type == "LangevinMiddle": integrator = LangevinMiddleIntegrator(**integrator_params) elif integrator_type == "Verlet": integrator = VerletIntegrator(**integrator_params) elif integrator_type == "Brownian": integrator = BrownianIntegrator(**integrator_params) else: raise Exception("Integrator type not recognized") return integrator def _prompt_summary(self, query: str): prompt_template = """Your input is the original query. Your task is to parse through the user query. and provide a summary of the file path input, the type of preprocessing needed (this is the same as cleaning the file), the forcefield used for the simulation, the ensemble of the simulation, the integrator needed, the number of steps, the timestep, the temperature, and other instructions. and follow the format "name: description. File Path: what is the file path of the file you are using? it must include a .cif or .pdb extension. Preprocessing: what preprocessing is needed? you can choose from the following: standard cleaning, remove water, add hydrogens, add hydrogens and remove water. The default is add hydrogens and remove water. Forcefield: what forcefields are you using? you can choose from the following: AMBER, CHARMM, OPLS, GROMACS. Default --> "amber14-all.xml, tip3p.xml". Ensemble: what ensemble are you using? you can choose from the following: NPT, NVT, NVE. Default --> "NVT". Integrator: what integrator are you using? you can choose from the following: Langevin, Verlet, Brownian. The default depends on the ensemble (NPT -> Langevin, NVT -> Langevin, NVE -> Verlet). Number of Steps: how many steps are you using? The default is 10000. Timestep: what is the timestep? Default --> "1 fs". Temperature: what is the temperature? Default --> "300 K". Pressure: What is the pressure? If NPT ensemble, the default is 1.0 bar, otherwise None. Friction: what is the friction coefficient? Default --> "1.0" record_params: what parameters do you want to record? you can choose from the following: step, time, potentialEnergy, kineticEnergy, totalEnergy, temperature, volume, density, progress, remainingTime, speed, elapsedTime, separator, systemMass, totalSteps, append. Default --> ["step", "potentialEnergy", "temperature"]. Other Instructions: what other instructions do you have? The default is none. Example of the final output: File Path: 1a1p.pdb Preprocessing: standard cleaning Forcefield: amber14-all.xml, tip3p.xml Ensemble: NPT Integrator: Langevin Number of Steps: 10000 Timestep: 1 fs Temperature: 300 K Pressure: 1.0 bar Friction: 1.0 record_params: ["step", "potentialEnergy", "temperature"] Other Instructions: none If there is not enough information in a category, you may fill in with the default, but explicitly state so. Here is the information:{query}""" prompt = PromptTemplate(template=prompt_template, input_variables=["query"]) llm_chain = LLMChain(prompt=prompt, llm=self.llm) return llm_chain.run(" ".join(query)) def _save_to_file(self, summary: str, filename: str): """Parse the summary string and save it to a file in JSON format.""" # Split the summary into lines lines = summary.strip().split("\n") # Parse each line into a key and a value summary_dict = {} for line in lines: key, value = line.split(":") summary_dict[key.strip()] = value.strip() # Save the dictionary to a file with open(filename, "w") as f: json.dump(summary_dict, f) # add filename to registry file_description = "Simulation Parameters" self.path_registry.map_path(filename, filename, file_description) def _instruction_summary(self, query: str): summary = self._prompt_summary(query) self._save_to_file(summary, "simulation_parameters.json") return summary def _setup_simulation_from_json(self, file_name): # Open the json file and load the parameters with open(file_name, "r") as f: params = json.load(f) return params def _setup_and_run_simulation(self, query): # Load the force field # ask for inputs from the user params = self._setup_simulation_from_json(query) # forcefield key can be forcefield_files or Forcefield if "forcefield_files" in params: params["forcefield_files"] = ( params["forcefield_files"] .replace("(default)", "") .replace(" and ", ",") .strip() ) Forcefield_files = [ file.strip() for file in params["forcefield_files"].split(",") ] Forcefield = Forcefield_files[0] Water_model = Forcefield_files[1] else: params["Forcefield"] = ( params["Forcefield"] .replace("(default)", "") .replace(" and ", ",") .strip() ) Forcefield_files = [ file.strip() for file in params["Forcefield"].split(",") ] Forcefield = Forcefield_files[0] Water_model = Forcefield_files[1] print("Setting up forcefields :", Forcefield, Water_model) st.markdown("Setting up forcefields", unsafe_allow_html=True) # check if forcefields end in .xml if Forcefield.endswith(".xml") and Water_model.endswith(".xml"): forcefield = ForceField(Forcefield, Water_model) # adding forcefield to registry # Load the PDB file CleaningTools(self.path_registry) pdbfile = self.path_registry.get_mapped_path(params["File Path"]) name = pdbfile.split(".")[0] end = pdbfile.split(".")[1] if end == "pdb": pdb = PDBFile(pdbfile) elif end == "cif": pdb = PDBxFile(pdbfile) modeller = Modeller(pdb.topology, pdb.positions) system = forcefield.createSystem( modeller.topology, nonbondedMethod=app.PME, nonbondedCutoff=1.0 * nanometers, constraints=app.PME, ) _integrator = params["Integrator"].split(" ")[0].strip() _temp = params["Temperature"].split(" ")[0].strip() _friction_coef = params["Friction"].split(" ")[0].strip() _timestep = params["Timestep"].split(" ")[0].strip() if _integrator == "Langevin": print( "Setting up Langevin integrator with Parameters:", _temp, "K", _friction_coef, "1/ps", _timestep, "fs", ) st.markdown("Setting up Langevin integrator", unsafe_allow_html=True) if params["Ensemble"] == "NPT": _pressure = params["Pressure"].split(" ")[0].strip() system.addForce(MonteCarloBarostat(_pressure * bar, _temp * kelvin)) integrator = LangevinIntegrator( float(_temp) * kelvin, float(_friction_coef) / picosecond, float(_timestep) * femtoseconds, ) elif _integrator == "Verlet": if params["Ensemble"] == "NPT": _pressure = params["Pressure"].split(" ")[0].strip() system.addForce(AndersenThermostat(_temp * kelvin, 1 / picosecond)) system.addForce(MonteCarloBarostat(_pressure * bar, _temp * kelvin)) print( "Setting up Verlet integrator with Parameters:", _timestep, "fs", _temp, "K", _pressure, "bar", ) print("Setting up Verlet integrator with Parameters:", _timestep, "fs") st.markdown("Setting up Verlet integrator", unsafe_allow_html=True) integrator = VerletIntegrator(float(_timestep) * picoseconds) simulation = Simulation(modeller.topology, system, integrator) simulation.context.setPositions(modeller.positions) simulation.minimizeEnergy() # save initial positions to registry file_name = "initial_positions.pdb" with open(file_name, "w") as f: PDBFile.writeFile( simulation.topology, simulation.context.getState(getPositions=True).getPositions(), f, ) print("Initial Positions saved to initial_positions.pdb") simulation.reporters.append(PDBReporter(f"{name}.pdb", 1000)) # reporter_args = {"reportInterval": 1000} reporter_args = {} params["record_params"] = ast.literal_eval(params["record_params"]) for param in params["record_params"]: if param in [ "step", "time", "potentialEnergy", "kineticEnergy", "totalEnergy", "temperature", "volume", "density", "progress", "remainingTime", "speed", "elapsedTime", "separator", "systemMass", "totalSteps", "append", ]: # The params from the json file should be booleans reporter_args[param] = True simulation.reporters.append( StateDataReporter(f"{name}.csv", 1000, **reporter_args) ) simulation.step(int(params["Number of Steps"].split(" ")[0].strip())) # add filenames to registry file_name1 = "simulation_trajectory.pdb" file_description1 = "Simulation PDB, containing the simulation trajectory" self.path_registry.map_path(file_name1, f"{name}.pdb", file_description1) file_name2 = "simulation_data.csv" file_description2 = ( "Simulation Data, containing step, potential energy, and temperature" ) self.path_registry.map_path(file_name2, f"{name}.csv", file_description2) return simulation def _extract_parameters_path(self): """Check directory for parameters.json file.""" # Check if there is a parameters.json file in the directory. if os.path.exists("simulation_parameters_summary.json"): return "simulation_parameters_summary.json" # If there's no exact match, check for # any JSON file that contains 'parameters' in its name. else: for file in os.listdir("."): if "parameters" in file and file.endswith(".json"): return file # If no matching file is found, raise an exception. raise ValueError("No parameters.json file found in directory.") class SetUpAndRunTool(BaseTool): name = "SetUpAndRunTool" description = """This tool will set up the simulation objects and run the simulation. It will ask for the parameters path. input: json file """ path_registry: Optional[PathRegistry] def __init__( self, path_registry: Optional[PathRegistry], ): super().__init__() self.path_registry = path_registry def _run(self, query: str) -> str: """Use the tool""" # find the parameters in the directory try: if self.path_registry is None: # this should not happen return "Registry not initialized" sim_fxns = SimulationFunctions(path_registry=self.path_registry) parameters = sim_fxns._extract_parameters_path() except ValueError as e: return ( str(e) + """\nPlease use the Instruction summary tool with the to create a parameters.json file in the directory.""" ) self.log("This are the parameters:") self.log(parameters) # print the parameters in json file with open(parameters) as f: params = json.load(f) for key, value in params.items(): print(key, ":", value) self.log("Are you sure you want to run the simulation? (y/n)") response = input("yes or no: ") if response.lower() in ["yes", "y"]: sim_fxns._setup_and_run_simulation(parameters) else: return "Simulation interrupted due to human input" return "Simulation Completed, simulation trajectory and data files saved." def log(self, text, color="blue"): if color == "blue": print("\033[1;34m\t{}\033[00m".format(text)) if color == "red": print("\033[31m\t{}\033[00m".format(text)) async def _arun(self, query: str) -> str: """Use the tool asynchronously.""" raise NotImplementedError("custom_search does not support async") #######==================System Configuration==================######## # System Configuration class SetUpandRunFunctionInput(BaseModel): pdb_id: str forcefield_files: List[str] save: bool = Field( True, description=( ( "Set to 'True' (default) to save the log files and trajectories " "of the simulation. " "If set to 'False', " "the simulation is considered as being in a testing " "or preliminary scripting stage, utilizing default parameters and " "results are not saved. " "This second setting is ideal for initial experimentation or " "basic script development before customizing the " "script for final use." ) ), ) system_params: Dict[str, Any] = Field( { "nonbondedMethod": "NoCutoff", "nonbondedCutoff": "1 * nanometers", "ewaldErrorTolerance": None, "constraints": "None", "rigidWater": False, "constraintTolerance": None, "solvate": False, }, description=( "Parameters for the openmm system. " "For nonbondedMethod, you can choose from the following:\n" "NoCutoff, CutoffNonPeriodic, CutoffPeriodic, Ewald, PME. " "If anything but NoCutoff is chosen," "you have to include a nonbondedCutoff" "and a constrainTolerance.\n" "If PME is chosen," "you have to include an ewaldErrorTolerance too." "For constraints, you can choose from the following:\n" "None, HBonds, AllBonds or OnlyWater." "For rigidWater, you can choose from the following:\n" "True, False.\n" "Finally, if you want to solvate the system, before the simulation," "you can set solvate to True.\n" "Example1:\n" "{'nonbondedMethod': 'NoCutoff',\n" "'constraints': 'None',\n" "'rigidWater': False}\n" "Example2:\n" "{'nonbondedMethod': 'CutoffPeriodic',\n" "'nonbondedCutoff': 1.0,\n" "'constraints': 'HBonds',\n" "'rigidWater': True,\n" "'constraintTolerance': 0.00001,\n" "'solvate': True} " ), ) integrator_params: Dict[str, Any] = Field( { "integrator_type": "LangevinMiddle", "Temperature": "300 * kelvin", "Friction": "1.0 / picoseconds", "Timestep": "0.002 * picoseconds", "Pressure": "1.0 * bar", }, description="""Parameters for the openmm integrator.""", ) simulation_params: Dict[str, Any] = Field( { "Ensemble": "NVT", "Number of Steps": 5000, "record_interval_steps": 100, "record_params": ["step", "potentialEnergy", "temperature"], }, description="""Parameters for the openmm simulation. The ensemble can be NPT, NVT or NVE. The number of steps is the number of steps the simulation will run for. record_interval_steps is the number of steps between each record: hould be the number of steps divided by 100. The record_params is a list of parameters that will be recorded during the simulation The options are: [Step,Time,Speed,Progress,RemainingTime,ElapsedTime, PotentialEnergy,KineticEnergy,TotalEnergy, Temperature,Volume,Density]""", ) #########===================================================================############ class OpenMMSimulation: def __init__( self, input_params: SetUpandRunFunctionInput, path_registry: PathRegistry, save: bool, sim_id: str, pdb_id: str, ): self.params = input_params self.save = save self.sim_id = sim_id self.pdb_id = pdb_id self.int_params = ( self.params.integrator_params if self.params.integrator_params is not None else { "integrator_type": "LangevinMiddle", "Temperature": 300 * kelvin, "Friction": 1.0 / picoseconds, "Timestep": 0.002 * picoseconds, "Pressure": 1.0 * bar, } ) self.sys_params = ( self.params.system_params if self.params.system_params is not None else { "nonbondedMethod": NoCutoff, "nonbondedCutoff": 1 * nanometers, "ewaldErrorTolerance": None, "constraints": AllBonds, "rigidWater": True, "constraintTolerance": 0.000001, "solvate": False, } ) self.sim_params = ( self.params.simulation_params if self.params.simulation_params is not None else { "Ensemble": "NVT", "Number of Steps": 5000, "record_interval_steps": 100, "record_params": ["step", "potentialEnergy", "temperature"], } ) self.path_registry = path_registry def setup_system(self): print("Building system...") st.markdown("Building system", unsafe_allow_html=True) self.pdb_id = self.params.pdb_id self.pdb_path = self.path_registry.get_mapped_path(self.pdb_id) self.pdb = PDBFile(self.pdb_path) self.forcefield = ForceField(*self.params.forcefield_files) self.system = self._create_system(self.pdb, self.forcefield, **self.sys_params) if self.sys_params.get("nonbondedMethod", None) in [ CutoffPeriodic, PME, ]: if self.sim_params["Ensemble"] == "NPT": self.system.addForce( MonteCarloBarostat( self.int_params["Pressure"], self.int_params["Temperature"], self.sim_params.get("barostatInterval", 25), ) ) def setup_integrator(self): print("Setting up integrator...") st.markdown("Setting up integrator", unsafe_allow_html=True) int_params = self.int_params integrator_type = int_params.get("integrator_type", "LangevinMiddle") if integrator_type == "LangevinMiddle": self.integrator = LangevinMiddleIntegrator( int_params["Temperature"], int_params["Friction"], int_params["Timestep"], ) elif integrator_type == "LangevinIntegrator": self.integrator = LangevinIntegrator( int_params["Temperature"], int_params["Friction"], int_params["Timestep"], ) else: raise ValueError("Invalid integrator type") self.integrator.setConstraintTolerance( self.sys_params.get("constraintTolerance", 0.000001) ) def create_simulation(self): print("Creating simulation...") st.markdown("Creating simulation", unsafe_allow_html=True) self.simulation = Simulation( self.modeller.topology, self.system, self.integrator, Platform.getPlatformByName("CPU"), ) self.simulation.context.setPositions(self.modeller.positions) # TEMPORARY FILE MANAGEMENT OR PATH REGISTRY MAPPING if self.save: trajectory_name = self.path_registry.write_file_name( type=FileType.RECORD, record_type="TRAJ", protein_file_id=self.pdb_id, Sim_id=self.sim_id, term="dcd", ) topology_name = self.path_registry.write_file_name( type=FileType.RECORD, record_type="TOP", protein_file_id=self.pdb_id, Sim_id=self.sim_id, term="pdb", ) log_name = self.path_registry.write_file_name( type=FileType.RECORD, record_type="LOG", protein_file_id=self.pdb_id, Sim_id=self.sim_id, term="txt", ) traj_desc = ( f"Simulation trajectory for protein {self.pdb_id}" f" and simulation {self.sim_id}" ) top_desc = ( f"Simulation topology for protein" f"{self.pdb_id} and simulation {self.sim_id}" ) log_desc = ( f"Simulation state log for protein {self.pdb_id} " f"and simulation {self.sim_id}" ) self.simulation.reporters.append( DCDReporter( f"{trajectory_name}", self.sim_params["record_interval_steps"], ) ) self.simulation.reporters.append( PDBReporter( f"{topology_name}", self.sim_params["record_interval_steps"], ) ) self.simulation.reporters.append( StateDataReporter( f"{log_name}", self.sim_params["record_interval_steps"], step=True, potentialEnergy=True, temperature=True, separator="\t", ) ) self.registry_records = [ ("holder", f"files/records/{trajectory_name}", traj_desc), ("holder", f"files/records/{log_name}", log_desc), ("holder", f"files/records/{topology_name}", top_desc), ] # TODO add checkpoint too? else: self.simulation.reporters.append( DCDReporter( "temp_trajectory.dcd", self.sim_params["record_interval_steps"], ) ) self.simulation.reporters.append( PDBReporter( "temp_topology.pdb", self.sim_params["record_interval_steps"], ) ) self.simulation.reporters.append( StateDataReporter( "temp_log.txt", self.sim_params["record_interval_steps"], step=True, potentialEnergy=True, temperature=True, separator="\t", ) ) def _create_system( self, pdb, forcefield, nonbondedMethod="NoCutoff", nonbondedCutoff=None, ewaldErrorTolerance=None, constraints="None", rigidWater=False, constraintTolerance=None, solvate=False, **kwargs, ): # Create a dictionary to hold system parameters system_params = { "nonbondedMethod": nonbondedMethod, "constraints": constraints, "rigidWater": rigidWater, } # Set nonbondedCutoff if applicable Had to double if pre-commit if nonbondedMethod in ["PME", "CutoffNonPeriodic", "CutoffPeriodic"]: if nonbondedCutoff is not None: system_params["nonbondedCutoff"] = nonbondedCutoff # Set ewaldErrorTolerance if PME is used if nonbondedMethod == "PME" and ewaldErrorTolerance is not None: system_params["ewaldErrorTolerance"] = ewaldErrorTolerance # Set constraintTolerance if constraints are used if constraints in ["HBonds", "AllBonds"] and constraintTolerance is not None: pass elif system_params["rigidWater"] and constraintTolerance is not None: pass # Update system_params with any additional parameters provided system_params.update(kwargs) # if use_constraint_tolerance: # constraintTolerance = system_params.pop('constraintTolerance') self.modeller = Modeller(pdb.topology, pdb.positions) if solvate: try: self.modeller.addSolvent(forcefield) except ValueError as e: print("Error adding solvent", type(e).__name__, "–", e) if "No Template for" in str(e): raise ValueError(str(e)) except AttributeError as e: print("Error adding solvent: ", type(e).__name__, "–", e) print("Trying to add solvent with 1 nm padding") if "NoneType" and "value_in_unit" in str(e): try: self.modeller.addSolvent(forcefield, padding=1 * nanometers) except Exception as e: print("Error adding solvent", type(e).__name__, "–", e) raise (e) system = forcefield.createSystem(self.modeller.topology, **system_params) else: system = forcefield.createSystem(self.modeller.topology, **system_params) return system def unit_to_string(self, unit): """Needed to convert units to strings for the script Otherwise internal __str()__ method makes the script not runnable""" return f"{unit.value_in_unit(unit.unit)}*{unit.unit.get_name()}" def _construct_script_content( self, pdb_path, forcefield_files, nonbonded_method, constraints, rigid_water, constraint_tolerance, nonbonded_cutoff, ewald_error_tolerance, hydrogen_mass, time_step, temperature, friction, ensemble, pressure, record_interval_steps, solvate, integrator_type, ): script_content = f""" # This script was generated by MDagent-Setup. from openmm import * from openmm.app import * from openmm.unit import * # Input Files pdb = PDBFile('{pdb_path}') forcefield = ForceField({forcefield_files}) # System Configuration nonbondedMethod = {nonbonded_method} constraints = {constraints} rigidWater = {rigid_water} """ if rigid_water and constraint_tolerance is not None: script_content += f"constraintTolerance = {constraint_tolerance}\n" # Conditionally add nonbondedCutoff if nonbonded_method != NoCutoff: script_content += f"nonbondedCutoff = {nonbonded_cutoff}\n" if nonbonded_method == PME: script_content += f"ewaldErrorTolerance = {ewald_error_tolerance}\n" if hydrogen_mass: script_content += f"hydrogenMass = {hydrogen_mass}\n" # ... other configurations ... script_content += f""" # Integration Options dt = {time_step} temperature = {temperature} friction = {friction} """ if ensemble == "NPT": script_content += f""" pressure = {pressure} barostatInterval = {self.sim_params.get("barostatInterval", 25)} """ # ... other integration options ... script_content += f""" # Simulation Options steps = {self.sim_params.get("Number of Steps", record_interval_steps)} equilibrationSteps = 1000 platform = Platform.getPlatformByName('CPU') dcdReporter = DCDReporter('trajectory.dcd', 1000) pdbReporter = PDBReporter('trajectory.pdb', 1000) dataReporter = StateDataReporter('log.txt', {record_interval_steps}, totalSteps=steps, step=True, speed=True, progress=True, elapsedTime=True, remainingTime=True, potentialEnergy=True, temperature=True, volume=True, density=True, separator='\t') checkpointReporter = CheckpointReporter('checkpoint.chk', 5000) # Minimize and Equilibrate # ... code for minimization and equilibration ... # Simulate print('Building system...') modeller = Modeller(pdb.topology, pdb.positions) """ if solvate: script_content += ( """modeller.addSolvent(forcefield, padding=1*nanometers)""" ) if nonbonded_method == NoCutoff: if hydrogen_mass: script_content += """ system = forcefield.createSystem(modeller.topology, nonbondedMethod=nonbondedMethod, constraints=constraints, rigidWater=rigidWater, hydrogenMass=hydrogenMass) """ else: script_content += """ system = forcefield.createSystem(modeller.topology, nonbondedMethod=nonbondedMethod, constraints=constraints, rigidWater=rigidWater) """ if nonbonded_method == CutoffNonPeriodic or nonbonded_method == CutoffPeriodic: if hydrogen_mass: script_content += """ system = forcefield.createSystem(modeller.topology, nonbondedMethod=nonbondedMethod, nonbondedCutoff=nonbondedCutoff, constraints=constraints, rigidWater=rigidWater, hydrogenMass=hydrogenMass) """ else: script_content += """ system = forcefield.createSystem(modeller.topology, nonbondedMethod=nonbondedMethod, nonbondedCutoff=nonbondedCutoff, constraints=constraints, rigidWater=rigidWater) """ if nonbonded_method == PME: if hydrogen_mass: script_content += """ system = forcefield.createSystem(modeller.topology, nonbondedMethod=nonbondedMethod, nonbondedCutoff=nonbondedCutoff, ewaldErrorTolerance=ewaldErrorTolerance, constraints=constraints, rigidWater=rigidWater, hydrogenMass=hydrogenMass) """ else: script_content += """ system = forcefield.createSystem(modeller.topology, nonbondedMethod=nonbondedMethod, nonbondedCutoff=nonbondedCutoff, ewaldErrorTolerance=ewaldErrorTolerance, constraints=constraints, rigidWater=rigidWater) """ if ensemble == "NPT": script_content += """ system.addForce(MonteCarloBarostat(pressure, temperature, barostatInterval)) """ if integrator_type == "LangevinMiddle" and constraints != "None": script_content += """ integrator = LangevinMiddleIntegrator(temperature, friction, dt) integrator.setConstraintTolerance(constraintTolerance) simulation = Simulation(modeller.topology, system, integrator, platform) simulation.context.setPositions(modeller.positions) """ if integrator_type == "LangevinMiddle" and constraints == "None": script_content += """ integrator = LangevinMiddleIntegrator(temperature, friction, dt) simulation = Simulation(modeller.topology, system, integrator, platform) simulation.context.setPositions(modeller.positions) """ script_content += """ # Minimize and Equilibrate print('Performing energy minimization...') simulation.minimizeEnergy() print('Equilibrating...') simulation.context.setVelocitiesToTemperature(temperature) simulation.step(equilibrationSteps) # Simulate print('Simulating...') simulation.reporters.append(dcdReporter) simulation.reporters.append(pdbReporter) simulation.reporters.append(dataReporter) simulation.reporters.append(checkpointReporter) simulation.currentStep = 0 simulation.step(steps) """ return script_content def write_standalone_script(self, filename="reproduce_simulation.py"): """Extracting parameters from the class instance Inspired by the code snippet provided from openmm-setup https://github.com/openmm/openmm-setup """ pdb_path = self.pdb_path forcefield_files = ", ".join( f"'{file}'" for file in self.params["forcefield_files"] ) nonbonded_method = self.sys_params.get("nonbondedMethod", NoCutoff) nbCo = self.sys_params.get("nonbondedCutoff", 1 * nanometers) nonbonded_cutoff = self.unit_to_string(nbCo) constraints = self.sys_params.get("constraints", "None") rigid_water = self.sys_params.get("rigidWater", False) ewald_error_tolerance = self.sys_params.get("ewaldErrorTolerance", 0.0005) constraint_tolerance = self.sys_params.get("constraintTolerance", None) hydrogen_mass = self.sys_params.get("hydrogenMass", None) solvate = self.sys_params.get("solvate", False) integrator_type = self.int_params.get("integrator_type", "LangevinMiddle") friction = self.int_params.get("Friction", 1.0 / picoseconds) friction = f"{friction.value_in_unit(friction.unit)}{friction.unit.get_name()}" _temp = self.int_params.get("Temperature", 300 * kelvin) temperature = self.unit_to_string(_temp) t_step = self.int_params.get("Timestep", 0.004 * picoseconds) time_step = self.unit_to_string(t_step) press = self.int_params.get("Pressure", 1.0 * bar) pressure = self.unit_to_string(press) ensemble = self.sim_params.get("Ensemble", "NVT") self.sim_params.get("Number of Steps", 10000) record_interval_steps = self.sim_params.get("record_interval_steps", 1000) script_content = self._construct_script_content( pdb_path, forcefield_files, nonbonded_method, constraints, rigid_water, constraint_tolerance, nonbonded_cutoff, ewald_error_tolerance, hydrogen_mass, time_step, temperature, friction, ensemble, pressure, record_interval_steps, solvate, integrator_type, ) # Remove leading spaces for proper formatting def remove_leading_spaces(text): lines = text.split("\n") stripped_lines = [line.lstrip() for line in lines] return "\n".join(stripped_lines) script_content = remove_leading_spaces(script_content) script_content = textwrap.dedent(script_content).strip() # Write to file directory = "files/simulations" if not os.path.exists(directory): os.makedirs(directory) with open(f"{directory}/{filename}", "w") as file: file.write(script_content) print(f"Standalone simulation script written to {directory}/{filename}") st.markdown("Standalone simulation script written", unsafe_allow_html=True) def run(self): # Minimize and Equilibrate print("Performing energy minimization...") st.markdown("Performing energy minimization", unsafe_allow_html=True) self.simulation.minimizeEnergy() print("Minimization complete!") top_name = f"files/pdb/{self.sim_id}_initial_positions.pdb" top_description = f"Initial positions for simulation {self.sim_id}" with open(top_name, "w") as f: PDBFile.writeFile( self.simulation.topology, self.simulation.context.getState(getPositions=True).getPositions(), f, ) self.path_registry.map_path(f"top_{self.sim_id}", top_name, top_description) print("Initial Positions saved to initial_positions.pdb") st.markdown("Minimization complete! Equilibrating...", unsafe_allow_html=True) print("Equilibrating...") _temp = self.int_params["Temperature"] self.simulation.context.setVelocitiesToTemperature(_temp) _eq_steps = self.sim_params.get("equilibrationSteps", 1000) self.simulation.step(_eq_steps) # Simulate print("Simulating...") st.markdown("Simulating...", unsafe_allow_html=True) self.simulation.currentStep = 0 self.simulation.step(self.sim_params["Number of Steps"]) print("Done!") st.markdown("Done!", unsafe_allow_html=True) if not self.save: if os.path.exists("temp_trajectory.dcd"): os.remove("temp_trajectory.dcd") if os.path.exists("temp_log.txt"): os.remove("temp_log.txt") if os.path.exists("temp_checkpoint.chk"): os.remove("temp_checkpoint.chk") return "Simulation done!" class SetUpandRunFunction(BaseTool): name: str = "SetUpandRunFunction" description: str = ( "This tool will set up and run a short simulation of a protein. " "Then will write a standalone script that can be used " "to reproduce the simulation or change accordingly for " "a more elaborate simulation. It only runs short simulations because, " "if there are errors, you can try again changing the input" ) args_schema: Type[BaseModel] = SetUpandRunFunctionInput path_registry: Optional[PathRegistry] def __init__(self, path_registry: Optional[PathRegistry]): super().__init__() self.path_registry = path_registry def _run(self, **input_args): if self.path_registry is None: return "Path registry not initialized" input = self.check_system_params(input_args) error = input.get("error", None) if error: print(f"error found: {error}") return error try: pdb_id = input["pdb_id"] # check if pdb_id is in the registry or as 1XYZ_112233 format if pdb_id not in self.path_registry.list_path_names(): return "No pdb_id found in input, use the file id not the file name" except KeyError: return "No pdb_id found in input" try: save = input["save"] # either this simulation # to save or not the output files from this simulation except KeyError: save = True print( "No 'save' key found in input, setting to True. " "Record files will be deleted after script is written." ) try: file_name = self.path_registry.write_file_name( type=FileType.SIMULATION, type_of_sim=input["simulation_params"]["Ensemble"], protein_file_id=pdb_id, ) sim_id = self.path_registry.get_fileid(file_name, FileType.SIMULATION) except Exception as e: print(f"An exception was found: {str(e)}.") return f"An exception was found trying to write the filenames: {str(e)}." try: openmmsim = OpenMMSimulation( input, self.path_registry, save, sim_id, pdb_id ) openmmsim.setup_system() openmmsim.setup_integrator() openmmsim.create_simulation() print("simulation set!") st.markdown("simulation set!", unsafe_allow_html=True) except ValueError as e: msg = str(e) + f"This were the inputs {input_args}" if "No template for" in msg: msg += ( "This error is likely due to non standard residues " "in the protein, if you havent done it yet, try " "cleaning the pdb file using the cleaning tool" ) return msg except FileNotFoundError: return f"File not found, check File id. This were the inputs {input_args}" except OpenMMException as e: return f"OpenMM Exception: {str(e)}. This were the inputs {input_args}" try: openmmsim.run() except Exception as e: return ( f"An exception was found: {str(e)}. Not a problem, thats one " "purpose of this tool: to run a short simulation to check for correct " "initialization. " "" "Try a) with different parameters like " "nonbondedMethod, constraints, etc \n or\n" "b) clean file inputs depending on error " ) try: openmmsim.write_standalone_script(filename=file_name) self.path_registry.map_path( sim_id, f"files/simulations/{file_name}", f"Basic Simulation of Protein {pdb_id}", ) if save: records = openmmsim.registry_records # move record files to files/records/ print(os.listdir(".")) if not os.path.exists("files/records"): os.makedirs("files/records") for record in records: os.rename(record[1].split("/")[-1], f"{record[1]}") for record in records: record_list = list(record) record_list[0] = self.path_registry.get_fileid( record_list[1].split("/")[-1], FileType.RECORD ) record = tuple(record_list) self.path_registry.map_path(*record) return ( "Simulation done! \n Summary: \n" "Record files written to files/records/ with IDs and descriptions: " f"{[(record[0],record[2]) for record in records]}\n" "Standalone script written to files/simulations/ with ID: " f"{sim_id}.\n" f"The initial topology file ID is top_{sim_id} saved in files/pdb/" ) except Exception as e: print(f"An exception was found: {str(e)}.") return f"An exception was found trying to write the filenames: {str(e)}." def _parse_cutoff(self, cutoff): # Check if cutoff is already an OpenMM Quantity (has a unit) possible_units = ["nm", "nanometer", "nanometers", "angstrom", "angstroms", "a"] if isinstance(cutoff, unit.Quantity): return cutoff # Convert to string in case it's not (e.g., int or float) cutoff = str(cutoff) if cutoff[-1] == "s": cutoff = cutoff[:-1] # Remove spaces and convert to lowercase for easier parsing cutoff = cutoff.replace(" ", "").lower() if cutoff.endswith("s"): cutoff = cutoff[:-1] # Check for multiplication symbol and split if necessary if "*" in cutoff: # Split on the '*' and extract the numerical part and the unit part num_part, unit_part = cutoff.split("*") # Convert the numerical part to a float num_value = float(num_part) else: # If there is no '*', it's either a number or a string like "1nm" # Attempt to convert directly to float; if it fails, # it must have a unit like "nm" or "angstrom" try: num_value = float(cutoff) unit_part = "nm" except ValueError: for possible_unit in possible_units: if possible_unit in cutoff: num_value = float(cutoff.replace(possible_unit, "")) unit_part = possible_unit break else: # Use regular expression to extract the # numerical part and the unit part match = re.match(r"([+-]?[0-9]*\.?[0-9]+)([a-zA-Z]*)", cutoff) if match: num_part, unit_part = match.groups() raise ValueError( f"""Unknown unit for nonbondedCutoff got {unit_part}. Try using nm or angstroms as value * unit.""" ) # Now convert the unit part to an OpenMM unit if unit_part in ["nm", "nanometer", "nanometers"]: return num_value * unit.nanometers elif unit_part in ["angstrom", "angstroms", "a"]: return num_value * unit.angstroms else: # If the unit is not recognized, raise an error raise ValueError( f"""Unknown unit for nonbondedCutoff got {unit_part}. Try using nm or angstroms as value * unit.""" ) def _parse_parameter(self, parameter, default_unit, possible_units): """ Parse a parameter and return it as an OpenMM Quantity with the correct unit. Args: parameter (float, str, or unit.Quantity): The input parameter value default_unit (unit.Unit): The default unit to use if none is provided possible_units (dict): A mapping of strings to their respective unit objects Returns: unit.Quantity: The parameter as an OpenMM Quantity with the correct unit. """ error_msg = "" if isinstance(parameter, unit.Quantity): return parameter, error_msg # Convert to string in case it's not (e.g., int or float) parameter_str = str(parameter) # Remove spaces and convert to lowercase for easier parsing parameter_str = parameter_str.replace(" ", "").lower() # Check for multiplication symbol and split if necessary # e.g. "1*kelvin" or "1*ps^-1" if "*" in parameter_str: num_part, unit_part = parameter_str.split("*") num_value = float(num_part) elif "poundforce/inch^2" in parameter_str: num_value = float(parameter_str.replace("poundforce/inch^2", "")) unit_part = "poundforce/inch^2" # Check for division symbol and split if necessary # e.g. "1/ps" or "1/ps^-1" elif "/" in parameter_str: num_part, unit_part = parameter_str.split("/") num_value = float(num_part) unit_part = "/" + unit_part elif "^-1" in parameter_str: parameter_str = parameter_str.replace("^-1", "") match = re.match(r"^(\d+(?:\.\d+)?)([a-zA-Z]+)$", parameter_str) num_value = float(match.group(1)) unit_part = "/" + match.group(2) else: # Attempt to convert directly to float; if it fails, # it must have a unit like "K", "ps", etc. try: num_value = float(parameter_str) unit_part = default_unit except ValueError: match = re.match(r"([+-]?[0-9]*\.?[0-9]+)([a-zA-Z]*)", parameter_str) if match: num_part, unit_part = match.groups() num_value = float(num_part) else: error_msg += f"Invalid format for parameter: '{parameter_str}'." # Convert the unit part to an OpenMM unit if unit_part.lower() in possible_units: return num_value * possible_units[unit_part.lower()], error_msg else: # If the unit is not recognized, raise an error error_msg += f"""Unknown unit '{unit_part}' for parameter. Valid units include: {list(possible_units.keys())}.""" return parameter, error_msg def parse_temperature(self, temperature): possible_units = { "k": unit.kelvin, "kelvin": unit.kelvin, } return self._parse_parameter(temperature, "k", possible_units) def parse_friction(self, friction): possible_units = { "/ps": (1 / unit.picoseconds), "/picosecond": (1 / unit.picoseconds), "/picoseconds": (1 / unit.picoseconds), "picosecond^-1": (1 / unit.picoseconds), "picoseconds^-1": (1 / unit.picoseconds), "/ps^-1": (1 / unit.picoseconds), "ps^-1": (1 / unit.picoseconds), "1*ps^-1": (1 / unit.picoseconds), } return self._parse_parameter(friction, "1/ps", possible_units) def parse_timestep(self, timestep): possible_units = { "ps": unit.picoseconds, "picosecond": unit.picoseconds, "picoseconds": unit.picoseconds, "fs": unit.femtoseconds, "femtosecond": unit.femtoseconds, "femtoseconds": unit.femtoseconds, "ns": unit.nanoseconds, "nanosecond": unit.nanoseconds, "nanoseconds": unit.nanoseconds, } return self._parse_parameter(timestep, "ps", possible_units) def parse_pressure(self, pressure): possible_units = { "bar": unit.bar, "atm": unit.atmospheres, "atmosphere": unit.atmospheres, "pascal": unit.pascals, "pascals": unit.pascals, "pa": unit.pascals, "poundforce/inch^2": unit.psi, "psi": unit.psi, } return self._parse_parameter(pressure, "bar", possible_units) def _process_parameters(self, user_params, param_type="system_params"): """ Process user provided parameters, converting strings to openmm objects if necessary. """ error_msg = "" processed_params = {} if param_type == "system_params": for key, value in user_params.items(): if key == "nonbondedMethod" or key == "nonbondedmethod": if value == "NoCutoff": processed_params[key] = NoCutoff elif value == "PME": processed_params[key] = PME elif value == "CutoffPeriodic": processed_params[key] = CutoffPeriodic elif value == "CutoffNonPeriodic": processed_params[key] = CutoffNonPeriodic elif value == "Ewald": processed_params[key] = Ewald else: # Assume it's already an openmm object processed_params[key] = value if key == "nonbondedCutoff" or key == "nonbondedcutoff": try: processed_params[key] = self._parse_cutoff(value) except ValueError as e: error_msg += f"Invalid nonbondedCutoff: {e}. \n" if key == "ewaldErrorTolerance" or key == "ewalderrortolerance": try: processed_params[key] = float(value) except TypeError as e: error_msg += ( f"Invalid ewaldErrorTolerance: {e}. " "If you are using null or None, " "just dont include it " "as part of the parameters.\n" ) if key == "constraints": try: if type(value) == str: if value == "None": processed_params[key] = None elif value == "HBonds": processed_params[key] = HBonds elif value == "AllBonds": processed_params[key] = AllBonds elif value == "HAngles": processed_params[key] = HAngles else: error_msg += ( f"Invalid constraints: Got {value}. " "Try using None, HBonds, AllBonds or " "HAngles\n" ) else: processed_params[key] = value except TypeError as e: error_msg += ( f"Invalid constraints: {e}. If you are using " "null or None, just dont include as " "part of the parameters.\n" ) if key == "rigidWater" or key == "rigidwater": if type(value) == bool: processed_params[key] = value elif value == "True": processed_params[key] = True elif value == "False": processed_params[key] = False else: error_msg += ( f"Invalid rigidWater: got {value}. " "Try using True or False.\n" ) if key == "constraintTolerance" or key == "constrainttolerance": try: processed_params[key] = float(value) except ValueError as e: error_msg += f"Invalid constraintTolerance: {e}." except TypeError as e: error_msg += ( f"Invalid constraintTolerance: {e}. If " "constraintTolerance is null or None, " "just dont include as part of " "the parameters.\n" ) if key == "solvate": try: if type(value) == bool: processed_params[key] = value elif value == "True": processed_params[key] = True elif value == "False": processed_params[key] = False else: error_msg += ( f"Invalid solvate: got {value}. " "Use either True or False.\n" ) except TypeError as e: error_msg += ( f"Invalid solvate: {e}. If solvate is null or " "None, just dont include as part of " "the parameters.\n" ) return processed_params, error_msg if param_type == "integrator_params": for key, value in user_params.items(): if key == "integrator_type" or key == "integratortype": if value == "LangevinMiddle" or value == LangevinMiddleIntegrator: processed_params[key] = "LangevinMiddle" elif value == "Langevin" or value == LangevinIntegrator: processed_params[key] = "Langevin" elif value == "Verlet" or value == VerletIntegrator: processed_params[key] = "Verlet" elif value == "Brownian" or value == BrownianIntegrator: processed_params[key] = "Brownian" else: error_msg += ( f"Invalid integrator_type: got {value}. " "Try using LangevinMiddle, Langevin, " "Verlet, or Brownian.\n" ) if key == "Temperature" or key == "temperature": temperature, msg = self.parse_temperature(value) processed_params[key] = temperature error_msg += msg if key == "Friction" or key == "friction": friction, msg = self.parse_friction(value) processed_params[key] = friction error_msg += msg if key == "Timestep" or key == "timestep": timestep, msg = self.parse_timestep(value) processed_params[key] = timestep error_msg += msg if key == "Pressure" or key == "pressure": pressure, msg = self.parse_pressure(value) processed_params[key] = pressure error_msg += msg return processed_params, error_msg if param_type == "simulation_params": for key, value in user_params.items(): if key == "Ensemble" or key == "ensemble": if value == "NPT": processed_params[key] = "NPT" elif value == "NVT": processed_params[key] = "NVT" elif value == "NVE": processed_params[key] = "NVE" else: error_msg += ( f"Invalid Ensemble. got {value}. " "Try using NPT, NVT, or NVE.\n" ) if key == "Number of Steps" or key == "number of steps": processed_params[key] = int(value) if key == "record_interval_steps" or key == "record interval steps": processed_params[key] = int(value) if key == "record_params" or key == "record params": processed_params[key] = value return processed_params, error_msg def check_system_params(cls, values): """Check that the system parameters are valid.""" # lowercase all keys in the dictionary error_msg = "" values = {k.lower(): v for k, v in values.items()} system_params = values.get("system_params") if system_params: system_params, msg = cls._process_parameters( system_params, param_type="system_params" ) if msg != "": error_msg += msg else: system_params = { "nonbondedMethod": NoCutoff, "nonbondedCutoff": 1 * nanometers, "ewaldErrorTolerance": None, "constraints": AllBonds, "rigidWater": True, "constraintTolerance": 0.00001, "solvate": False, } integrator_params = values.get("integrator_params") if integrator_params: integrator_params, msg = cls._process_parameters( integrator_params, param_type="integrator_params" ) if msg != "": error_msg += msg else: integrator_params = { "integrator_type": "LangevinMiddle", "Temperature": 300 * kelvin, "Friction": 1.0 / picoseconds, "Timestep": 0.004 * picoseconds, "Pressure": 1.0 * bar, } simulation_params = values.get("simulation_params") if simulation_params is None: simulation_params = { "Ensemble": "NVT", "Number of Steps": 10000, "record_interval_steps": 100, "record_params": ["step", "potentialEnergy", "temperature"], } # lowercase all keys in the dictionary # system_params = {k.lower(): v for k, v in system_params.items()} # integrator_params = {k.lower(): v for k, v in integrator_params.items()} # simulation_params = {k.lower(): v for k, v in simulation_params.items()} nonbondedMethod = system_params.get("nonbondedMethod") nonbondedCutoff = system_params.get("nonbondedCutoff") ewaldErrorTolerance = system_params.get("ewaldErrorTolerance") constraints = system_params.get("constraints") rigidWater = system_params.get("rigidWater") constraintTolerance = system_params.get("constraintTolerance") methods_with_cutoff = { "PME", "CutoffNonPeriodic", "CutoffPeriodic", "Ewald", PME, CutoffNonPeriodic, CutoffPeriodic, Ewald, } constraints_with_tolerance = { "HBonds", "AllBonds", "OnlyWater", HBonds, AllBonds, } if nonbondedMethod in methods_with_cutoff and nonbondedCutoff is None: error_msg += """nonbondedCutoff must be specified if nonbondedMethod is not NoCutoff\n""" if nonbondedMethod in {"PME", PME} and ewaldErrorTolerance is None: error_msg += """ewaldErrorTolerance must be specified when nonbondedMethod is PME\n""" if constraints in constraints_with_tolerance and constraintTolerance is None: error_msg += """constraintTolerance must be specified when constraints is HBonds or AllBonds""" if rigidWater and constraintTolerance is None: error_msg = "constraintTolerance must be specified if rigidWater is True" """Checking if the file is in the path""" pdb_id = values.get("pdb_id") if not pdb_id: error_msg += "The pdb id is not present in the inputs" """Validating the forcefield files and Integrator""" integrator_type = integrator_params.get("integrator_type") if integrator_type not in ["LangevinMiddle", "Verlet", "Brownian"]: error_msg += """integrator_type must be one of the following: LangevinMiddle, Verlet, Brownian\n""" if integrator_type == "LangevinMiddle": friction = integrator_params.get("Friction") if friction is None: error_msg += """friction must be specified when integrator_type is LangevinMiddle\n""" timestep = integrator_params.get("Timestep") if timestep is None: error_msg += """timestep must be specified when integrator_type is LangevinMiddle\n""" temp = integrator_params.get("Temperature") if temp is None: error_msg += """temperature must be specified when integrator_type is LangevinMiddle\n""" if integrator_type == "Verlet": timestep = integrator_params.get("Timestep") if timestep is None: error_msg += """timestep must be specified when integrator_type is Verlet\n""" if integrator_type == "Brownian": temperature = integrator_params.get("Temperature") if temperature is None: error_msg += """temperature must be specified when integrator_type is Brownian\n""" # forcefield forcefield_files = values.get("forcefield_files") if forcefield_files is None or forcefield_files is []: print("Setting default forcefields") st.markdown("Setting default forcefields", unsafe_allow_html=True) forcefield_files = ["amber14-all.xml", "amber14/tip3pfb.xml"] elif len(forcefield_files) == 0: print("Setting default forcefields v2") st.markdown("Setting default forcefields", unsafe_allow_html=True) forcefield_files = ["amber14-all.xml", "amber14/tip3pfb.xml"] else: for file in forcefield_files: if file not in FORCEFIELD_LIST: error_msg += "The forcefield file is not present" save = values.get("save", True) if type(save) != bool: error_msg += "save must be a boolean value" if error_msg != "": return { "error": error_msg + "\n Correct this and try again. \n Everthing else is fine" } values = { "pdb_id": pdb_id, "forcefield_files": forcefield_files, "save": save, "system_params": system_params, "integrator_params": integrator_params, "simulation_params": simulation_params, } # if no error, return the values return values async def _arun(self, query: str) -> str: """Use the tool asynchronously.""" raise NotImplementedError("custom_search does not support async") def create_simulation_input(pdb_path, forcefield_files): """ This function takes a PDB file path and a list of forcefield files. It creates and returns a PDBFile and ForceField object. The forcefield_files list can contain one or more files. If only one file is provided, it assumes that the file includes both the forcefield and the water model if needed. Parameters: pdb_path (str): The file path to the PDB file. forcefield_files (list of str): A list of file paths to the forcefield XML files. Returns: tuple: A tuple containing the PDBFile and ForceField objects. """ # Load the PDB file pdb_path.split(".")[0] end = pdb_path.split(".")[1] if end == "pdb": pdb = PDBFile(pdb_path) elif end == "cif": pdb = PDBxFile(pdb_path) # Clean up forcefield files list and remove any empty strings forcefield_files = ( forcefield_files.replace("(default)", "").replace(" and ", ",").strip() ) Forcefield_files = [file.strip() for file in forcefield_files.split(",")] Forcefield = Forcefield_files[0] Water_model = Forcefield_files[1] # check if they are part of the list if Forcefield not in FORCEFIELD_LIST: raise Exception("Forcefield not recognized") if Water_model not in FORCEFIELD_LIST: raise Exception("Water model not recognized") forcefield = ForceField(Forcefield, Water_model) # TODO Not all forcefields require water model return pdb, forcefield
[ "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate", "langchain.chat_models.ChatOpenAI" ]
[((20314, 20720), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""Set to \'True\' (default) to save the log files and trajectories of the simulation. If set to \'False\', the simulation is considered as being in a testing or preliminary scripting stage, utilizing default parameters and results are not saved. This second setting is ideal for initial experimentation or basic script development before customizing the script for final use."""'}), '(True, description=\n "Set to \'True\' (default) to save the log files and trajectories of the simulation. If set to \'False\', the simulation is considered as being in a testing or preliminary scripting stage, utilizing default parameters and results are not saved. This second setting is ideal for initial experimentation or basic script development before customizing the script for final use."\n )\n', (20319, 20720), False, 'from pydantic import BaseModel, Field\n'), ((20979, 22020), 'pydantic.Field', 'Field', (["{'nonbondedMethod': 'NoCutoff', 'nonbondedCutoff': '1 * nanometers',\n 'ewaldErrorTolerance': None, 'constraints': 'None', 'rigidWater': False,\n 'constraintTolerance': None, 'solvate': False}"], {'description': '"""Parameters for the openmm system. For nonbondedMethod, you can choose from the following:\nNoCutoff, CutoffNonPeriodic, CutoffPeriodic, Ewald, PME. If anything but NoCutoff is chosen,you have to include a nonbondedCutoffand a constrainTolerance.\nIf PME is chosen,you have to include an ewaldErrorTolerance too.For constraints, you can choose from the following:\nNone, HBonds, AllBonds or OnlyWater.For rigidWater, you can choose from the following:\nTrue, False.\nFinally, if you want to solvate the system, before the simulation,you can set solvate to True.\nExample1:\n{\'nonbondedMethod\': \'NoCutoff\',\n\'constraints\': \'None\',\n\'rigidWater\': False}\nExample2:\n{\'nonbondedMethod\': \'CutoffPeriodic\',\n\'nonbondedCutoff\': 1.0,\n\'constraints\': \'HBonds\',\n\'rigidWater\': True,\n\'constraintTolerance\': 0.00001,\n\'solvate\': True} """'}), '({\'nonbondedMethod\': \'NoCutoff\', \'nonbondedCutoff\': \'1 * nanometers\',\n \'ewaldErrorTolerance\': None, \'constraints\': \'None\', \'rigidWater\': False,\n \'constraintTolerance\': None, \'solvate\': False}, description=\n """Parameters for the openmm system. For nonbondedMethod, you can choose from the following:\nNoCutoff, CutoffNonPeriodic, CutoffPeriodic, Ewald, PME. If anything but NoCutoff is chosen,you have to include a nonbondedCutoffand a constrainTolerance.\nIf PME is chosen,you have to include an ewaldErrorTolerance too.For constraints, you can choose from the following:\nNone, HBonds, AllBonds or OnlyWater.For rigidWater, you can choose from the following:\nTrue, False.\nFinally, if you want to solvate the system, before the simulation,you can set solvate to True.\nExample1:\n{\'nonbondedMethod\': \'NoCutoff\',\n\'constraints\': \'None\',\n\'rigidWater\': False}\nExample2:\n{\'nonbondedMethod\': \'CutoffPeriodic\',\n\'nonbondedCutoff\': 1.0,\n\'constraints\': \'HBonds\',\n\'rigidWater\': True,\n\'constraintTolerance\': 0.00001,\n\'solvate\': True} """\n )\n', (20984, 22020), False, 'from pydantic import BaseModel, Field\n'), ((22557, 22791), 'pydantic.Field', 'Field', (["{'integrator_type': 'LangevinMiddle', 'Temperature': '300 * kelvin',\n 'Friction': '1.0 / picoseconds', 'Timestep': '0.002 * picoseconds',\n 'Pressure': '1.0 * bar'}"], {'description': '"""Parameters for the openmm integrator."""'}), "({'integrator_type': 'LangevinMiddle', 'Temperature': '300 * kelvin',\n 'Friction': '1.0 / picoseconds', 'Timestep': '0.002 * picoseconds',\n 'Pressure': '1.0 * bar'}, description=\n 'Parameters for the openmm integrator.')\n", (22562, 22791), False, 'from pydantic import BaseModel, Field\n'), ((22917, 23653), 'pydantic.Field', 'Field', (["{'Ensemble': 'NVT', 'Number of Steps': 5000, 'record_interval_steps': 100,\n 'record_params': ['step', 'potentialEnergy', 'temperature']}"], {'description': '"""Parameters for the openmm simulation.\n The ensemble can be NPT, NVT or NVE.\n The number of steps is the number of steps the simulation will run for.\n record_interval_steps is the number of steps between each record:\n hould be the number of steps divided by 100.\n The record_params is a list of parameters that will\n be recorded during the simulation The options are:\n [Step,Time,Speed,Progress,RemainingTime,ElapsedTime,\n PotentialEnergy,KineticEnergy,TotalEnergy,\n Temperature,Volume,Density]"""'}), '({\'Ensemble\': \'NVT\', \'Number of Steps\': 5000, \'record_interval_steps\':\n 100, \'record_params\': [\'step\', \'potentialEnergy\', \'temperature\']},\n description=\n """Parameters for the openmm simulation.\n The ensemble can be NPT, NVT or NVE.\n The number of steps is the number of steps the simulation will run for.\n record_interval_steps is the number of steps between each record:\n hould be the number of steps divided by 100.\n The record_params is a list of parameters that will\n be recorded during the simulation The options are:\n [Step,Time,Speed,Progress,RemainingTime,ElapsedTime,\n PotentialEnergy,KineticEnergy,TotalEnergy,\n Temperature,Volume,Density]"""\n )\n', (22922, 23653), False, 'from pydantic import BaseModel, Field\n'), ((74259, 74294), 'openmm.app.ForceField', 'ForceField', (['Forcefield', 'Water_model'], {}), '(Forcefield, Water_model)\n', (74269, 74294), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((2848, 3019), 'langchain.chat_models.ChatOpenAI', 'langchain.chat_models.ChatOpenAI', ([], {'temperature': 'self.temperature', 'model_name': 'self.model_name', 'request_timeout': 'self.request_timeout', 'max_tokens': 'self.request_timeout'}), '(temperature=self.temperature, model_name=\n self.model_name, request_timeout=self.request_timeout, max_tokens=self.\n request_timeout)\n', (2880, 3019), False, 'import langchain\n'), ((9952, 10019), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['query']"}), "(template=prompt_template, input_variables=['query'])\n", (9966, 10019), False, 'from langchain.prompts import PromptTemplate\n'), ((10040, 10077), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'self.llm'}), '(prompt=prompt, llm=self.llm)\n', (10048, 10077), False, 'from langchain.chains import LLMChain\n'), ((12416, 12477), 'streamlit.markdown', 'st.markdown', (['"""Setting up forcefields"""'], {'unsafe_allow_html': '(True)'}), "('Setting up forcefields', unsafe_allow_html=True)\n", (12427, 12477), True, 'import streamlit as st\n'), ((12732, 12765), 'mdagent.tools.base_tools.preprocess_tools.CleaningTools', 'CleaningTools', (['self.path_registry'], {}), '(self.path_registry)\n', (12745, 12765), False, 'from mdagent.tools.base_tools.preprocess_tools import CleaningTools\n'), ((13056, 13093), 'openmm.app.Modeller', 'Modeller', (['pdb.topology', 'pdb.positions'], {}), '(pdb.topology, pdb.positions)\n', (13064, 13093), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((15170, 15219), 'openmm.app.Simulation', 'Simulation', (['modeller.topology', 'system', 'integrator'], {}), '(modeller.topology, system, integrator)\n', (15180, 15219), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((15873, 15914), 'ast.literal_eval', 'ast.literal_eval', (["params['record_params']"], {}), "(params['record_params'])\n", (15889, 15914), False, 'import ast\n'), ((17472, 17524), 'os.path.exists', 'os.path.exists', (['"""simulation_parameters_summary.json"""'], {}), "('simulation_parameters_summary.json')\n", (17486, 17524), False, 'import os\n'), ((25493, 25547), 'streamlit.markdown', 'st.markdown', (['"""Building system"""'], {'unsafe_allow_html': '(True)'}), "('Building system', unsafe_allow_html=True)\n", (25504, 25547), True, 'import streamlit as st\n'), ((25680, 25702), 'openmm.app.PDBFile', 'PDBFile', (['self.pdb_path'], {}), '(self.pdb_path)\n', (25687, 25702), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((25729, 25770), 'openmm.app.ForceField', 'ForceField', (['*self.params.forcefield_files'], {}), '(*self.params.forcefield_files)\n', (25739, 25770), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((26409, 26469), 'streamlit.markdown', 'st.markdown', (['"""Setting up integrator"""'], {'unsafe_allow_html': '(True)'}), "('Setting up integrator', unsafe_allow_html=True)\n", (26420, 26469), True, 'import streamlit as st\n'), ((27344, 27402), 'streamlit.markdown', 'st.markdown', (['"""Creating simulation"""'], {'unsafe_allow_html': '(True)'}), "('Creating simulation', unsafe_allow_html=True)\n", (27355, 27402), True, 'import streamlit as st\n'), ((32351, 32388), 'openmm.app.Modeller', 'Modeller', (['pdb.topology', 'pdb.positions'], {}), '(pdb.topology, pdb.positions)\n', (32359, 32388), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((43173, 43248), 'streamlit.markdown', 'st.markdown', (['"""Standalone simulation script written"""'], {'unsafe_allow_html': '(True)'}), "('Standalone simulation script written', unsafe_allow_html=True)\n", (43184, 43248), True, 'import streamlit as st\n'), ((43363, 43432), 'streamlit.markdown', 'st.markdown', (['"""Performing energy minimization"""'], {'unsafe_allow_html': '(True)'}), "('Performing energy minimization', unsafe_allow_html=True)\n", (43374, 43432), True, 'import streamlit as st\n'), ((44047, 44125), 'streamlit.markdown', 'st.markdown', (['"""Minimization complete! Equilibrating..."""'], {'unsafe_allow_html': '(True)'}), "('Minimization complete! Equilibrating...', unsafe_allow_html=True)\n", (44058, 44125), True, 'import streamlit as st\n'), ((44439, 44491), 'streamlit.markdown', 'st.markdown', (['"""Simulating..."""'], {'unsafe_allow_html': '(True)'}), "('Simulating...', unsafe_allow_html=True)\n", (44450, 44491), True, 'import streamlit as st\n'), ((44628, 44672), 'streamlit.markdown', 'st.markdown', (['"""Done!"""'], {'unsafe_allow_html': '(True)'}), "('Done!', unsafe_allow_html=True)\n", (44639, 44672), True, 'import streamlit as st\n'), ((73603, 73620), 'openmm.app.PDBFile', 'PDBFile', (['pdb_path'], {}), '(pdb_path)\n', (73610, 73620), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((5414, 5459), 'openmm.LangevinMiddleIntegrator', 'LangevinMiddleIntegrator', ([], {}), '(**integrator_params)\n', (5438, 5459), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((10642, 10668), 'json.dump', 'json.dump', (['summary_dict', 'f'], {}), '(summary_dict, f)\n', (10651, 10668), False, 'import json\n'), ((11183, 11195), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11192, 11195), False, 'import json\n'), ((12619, 12654), 'openmm.app.ForceField', 'ForceField', (['Forcefield', 'Water_model'], {}), '(Forcefield, Water_model)\n', (12629, 12654), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((12956, 12972), 'openmm.app.PDBFile', 'PDBFile', (['pdbfile'], {}), '(pdbfile)\n', (12963, 12972), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((13846, 13915), 'streamlit.markdown', 'st.markdown', (['"""Setting up Langevin integrator"""'], {'unsafe_allow_html': '(True)'}), "('Setting up Langevin integrator', unsafe_allow_html=True)\n", (13857, 13915), True, 'import streamlit as st\n'), ((15727, 15759), 'openmm.app.PDBReporter', 'PDBReporter', (['f"""{name}.pdb"""', '(1000)'], {}), "(f'{name}.pdb', 1000)\n", (15738, 15759), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((16625, 16680), 'openmm.app.StateDataReporter', 'StateDataReporter', (['f"""{name}.csv"""', '(1000)'], {}), "(f'{name}.csv', 1000, **reporter_args)\n", (16642, 16680), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((17731, 17746), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (17741, 17746), False, 'import os\n'), ((19272, 19284), 'json.load', 'json.load', (['f'], {}), '(f)\n', (19281, 19284), False, 'import json\n'), ((26664, 26767), 'openmm.LangevinMiddleIntegrator', 'LangevinMiddleIntegrator', (["int_params['Temperature']", "int_params['Friction']", "int_params['Timestep']"], {}), "(int_params['Temperature'], int_params['Friction'],\n int_params['Timestep'])\n", (26688, 26767), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((27543, 27576), 'openmm.Platform.getPlatformByName', 'Platform.getPlatformByName', (['"""CPU"""'], {}), "('CPU')\n", (27569, 27576), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((42922, 42947), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (42936, 42947), False, 'import os\n'), ((42961, 42983), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (42972, 42983), False, 'import os\n'), ((44714, 44751), 'os.path.exists', 'os.path.exists', (['"""temp_trajectory.dcd"""'], {}), "('temp_trajectory.dcd')\n", (44728, 44751), False, 'import os\n'), ((44817, 44847), 'os.path.exists', 'os.path.exists', (['"""temp_log.txt"""'], {}), "('temp_log.txt')\n", (44831, 44847), False, 'import os\n'), ((44906, 44943), 'os.path.exists', 'os.path.exists', (['"""temp_checkpoint.chk"""'], {}), "('temp_checkpoint.chk')\n", (44920, 44943), False, 'import os\n'), ((47535, 47589), 'streamlit.markdown', 'st.markdown', (['"""simulation set!"""'], {'unsafe_allow_html': '(True)'}), "('simulation set!', unsafe_allow_html=True)\n", (47546, 47589), True, 'import streamlit as st\n'), ((71482, 71548), 'streamlit.markdown', 'st.markdown', (['"""Setting default forcefields"""'], {'unsafe_allow_html': '(True)'}), "('Setting default forcefields', unsafe_allow_html=True)\n", (71493, 71548), True, 'import streamlit as st\n'), ((73658, 73676), 'openmm.app.PDBxFile', 'PDBxFile', (['pdb_path'], {}), '(pdb_path)\n', (73666, 73676), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((5527, 5564), 'openmm.VerletIntegrator', 'VerletIntegrator', ([], {}), '(**integrator_params)\n', (5543, 5564), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((13018, 13035), 'openmm.app.PDBxFile', 'PDBxFile', (['pdbfile'], {}), '(pdbfile)\n', (13026, 13035), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((15006, 15073), 'streamlit.markdown', 'st.markdown', (['"""Setting up Verlet integrator"""'], {'unsafe_allow_html': '(True)'}), "('Setting up Verlet integrator', unsafe_allow_html=True)\n", (15017, 15073), True, 'import streamlit as st\n'), ((26911, 27008), 'openmm.LangevinIntegrator', 'LangevinIntegrator', (["int_params['Temperature']", "int_params['Friction']", "int_params['Timestep']"], {}), "(int_params['Temperature'], int_params['Friction'],\n int_params['Timestep'])\n", (26929, 27008), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((29047, 29122), 'openmm.app.DCDReporter', 'DCDReporter', (['f"""{trajectory_name}"""', "self.sim_params['record_interval_steps']"], {}), "(f'{trajectory_name}', self.sim_params['record_interval_steps'])\n", (29058, 29122), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((29258, 29331), 'openmm.app.PDBReporter', 'PDBReporter', (['f"""{topology_name}"""', "self.sim_params['record_interval_steps']"], {}), "(f'{topology_name}', self.sim_params['record_interval_steps'])\n", (29269, 29331), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((29467, 29612), 'openmm.app.StateDataReporter', 'StateDataReporter', (['f"""{log_name}"""', "self.sim_params['record_interval_steps']"], {'step': '(True)', 'potentialEnergy': '(True)', 'temperature': '(True)', 'separator': '"""\t"""'}), "(f'{log_name}', self.sim_params['record_interval_steps'],\n step=True, potentialEnergy=True, temperature=True, separator='\\t')\n", (29484, 29612), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((30145, 30221), 'openmm.app.DCDReporter', 'DCDReporter', (['"""temp_trajectory.dcd"""', "self.sim_params['record_interval_steps']"], {}), "('temp_trajectory.dcd', self.sim_params['record_interval_steps'])\n", (30156, 30221), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((30357, 30431), 'openmm.app.PDBReporter', 'PDBReporter', (['"""temp_topology.pdb"""', "self.sim_params['record_interval_steps']"], {}), "('temp_topology.pdb', self.sim_params['record_interval_steps'])\n", (30368, 30431), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((30567, 30713), 'openmm.app.StateDataReporter', 'StateDataReporter', (['"""temp_log.txt"""', "self.sim_params['record_interval_steps']"], {'step': '(True)', 'potentialEnergy': '(True)', 'temperature': '(True)', 'separator': '"""\t"""'}), "('temp_log.txt', self.sim_params['record_interval_steps'],\n step=True, potentialEnergy=True, temperature=True, separator='\\t')\n", (30584, 30713), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((33597, 33626), 'openmm.unit.value_in_unit', 'unit.value_in_unit', (['unit.unit'], {}), '(unit.unit)\n', (33615, 33626), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((33629, 33649), 'openmm.unit.unit.get_name', 'unit.unit.get_name', ([], {}), '()\n', (33647, 33649), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((42802, 42833), 'textwrap.dedent', 'textwrap.dedent', (['script_content'], {}), '(script_content)\n', (42817, 42833), False, 'import textwrap\n'), ((44769, 44801), 'os.remove', 'os.remove', (['"""temp_trajectory.dcd"""'], {}), "('temp_trajectory.dcd')\n", (44778, 44801), False, 'import os\n'), ((44865, 44890), 'os.remove', 'os.remove', (['"""temp_log.txt"""'], {}), "('temp_log.txt')\n", (44874, 44890), False, 'import os\n'), ((44961, 44993), 'os.remove', 'os.remove', (['"""temp_checkpoint.chk"""'], {}), "('temp_checkpoint.chk')\n", (44970, 44993), False, 'import os\n'), ((71728, 71794), 'streamlit.markdown', 'st.markdown', (['"""Setting default forcefields"""'], {'unsafe_allow_html': '(True)'}), "('Setting default forcefields', unsafe_allow_html=True)\n", (71739, 71794), True, 'import streamlit as st\n'), ((5634, 5673), 'openmm.BrownianIntegrator', 'BrownianIntegrator', ([], {}), '(**integrator_params)\n', (5652, 5673), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((14061, 14112), 'openmm.MonteCarloBarostat', 'MonteCarloBarostat', (['(_pressure * bar)', '(_temp * kelvin)'], {}), '(_pressure * bar, _temp * kelvin)\n', (14079, 14112), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((49165, 49180), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (49175, 49180), False, 'import os\n'), ((49205, 49236), 'os.path.exists', 'os.path.exists', (['"""files/records"""'], {}), "('files/records')\n", (49219, 49236), False, 'import os\n'), ((49258, 49286), 'os.makedirs', 'os.makedirs', (['"""files/records"""'], {}), "('files/records')\n", (49269, 49286), False, 'import os\n'), ((14496, 14546), 'openmm.AndersenThermostat', 'AndersenThermostat', (['(_temp * kelvin)', '(1 / picosecond)'], {}), '(_temp * kelvin, 1 / picosecond)\n', (14514, 14546), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((14580, 14631), 'openmm.MonteCarloBarostat', 'MonteCarloBarostat', (['(_pressure * bar)', '(_temp * kelvin)'], {}), '(_pressure * bar, _temp * kelvin)\n', (14598, 14631), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((52042, 52096), 're.match', 're.match', (['"""([+-]?[0-9]*\\\\.?[0-9]+)([a-zA-Z]*)"""', 'cutoff'], {}), "('([+-]?[0-9]*\\\\.?[0-9]+)([a-zA-Z]*)', cutoff)\n", (52050, 52096), False, 'import re\n'), ((54705, 54763), 're.match', 're.match', (['"""^(\\\\d+(?:\\\\.\\\\d+)?)([a-zA-Z]+)$"""', 'parameter_str'], {}), "('^(\\\\d+(?:\\\\.\\\\d+)?)([a-zA-Z]+)$', parameter_str)\n", (54713, 54763), False, 'import re\n'), ((55149, 55210), 're.match', 're.match', (['"""([+-]?[0-9]*\\\\.?[0-9]+)([a-zA-Z]*)"""', 'parameter_str'], {}), "('([+-]?[0-9]*\\\\.?[0-9]+)([a-zA-Z]*)', parameter_str)\n", (55157, 55210), False, 'import re\n')]
import langchain from langchain_openai import AzureChatOpenAI from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory from langchain.prompts.chat import MessagesPlaceholder from tech_agents.command import Command, check_command from tech_agents.dispatcher import MainDispatcherAgent from tech_agents.template import default_value class MainAgent: llm: AzureChatOpenAI memory: ConversationBufferMemory chat_history: MessagesPlaceholder verbose: bool """ MainAgentクラスは、メインのエージェントを表すクラスです。 このクラスは、AzureChatOpenAI、ConversationBufferMemory、MessagesPlaceholderなどの属性を持ちます。 メインエージェントは、指定された入力に対してAgentクラスを実行します。 """ def __init__( self, llm: AzureChatOpenAI = default_value.default_llm, memory: ConversationBufferMemory = default_value.default_memory, chat_history: MessagesPlaceholder = default_value.default_chat_history, verbose: bool = False, ): """ MainAgentクラスのコンストラクタです。 デフォルトの引数を使用して、AzureChatOpenAI、ConversationBufferMemory、MessagesPlaceholder、verboseを初期化します。 インスタンス化 ------------ main_agent = MainAgent( llm=あなたの使用したいLLM, memory=あなたの使用したいメモリ, chat_history=あなたの使用したい会話履歴, verbose=デバッグモードを有効にするかどうか ) 実行 ------------ message = "こんにちは" output = main_agent.run(message) print(output) """ # 引数の初期化 self.llm = llm self.memory = memory self.chat_history = chat_history self.verbose = verbose # メモリの読み取り専用化 self.readonly_memory = ReadOnlySharedMemory(memory=self.memory) # デバッグモードの設定 langchain.debug = self.verbose def run(self, user_message: str) -> str: """ メインエージェントを実行するメソッドです。 Agentクラスを生成し、指定された入力を渡して実行します。 """ param = check_command(user_message) if param.check_command_bool: CommandAgent = Command( llm=self.llm, memory=self.memory, readonly_memory=self.readonly_memory, chat_history=self.chat_history, verbose=self.verbose ) return CommandAgent.run(param.command, user_message) main_agent = MainDispatcherAgent( llm=self.llm, memory=self.memory, readonly_memory=self.readonly_memory, chat_history=self.chat_history, verbose=self.verbose ) return main_agent.run(user_message)
[ "langchain.memory.ReadOnlySharedMemory" ]
[((1669, 1709), 'langchain.memory.ReadOnlySharedMemory', 'ReadOnlySharedMemory', ([], {'memory': 'self.memory'}), '(memory=self.memory)\n', (1689, 1709), False, 'from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n'), ((1926, 1953), 'tech_agents.command.check_command', 'check_command', (['user_message'], {}), '(user_message)\n', (1939, 1953), False, 'from tech_agents.command import Command, check_command\n'), ((2341, 2491), 'tech_agents.dispatcher.MainDispatcherAgent', 'MainDispatcherAgent', ([], {'llm': 'self.llm', 'memory': 'self.memory', 'readonly_memory': 'self.readonly_memory', 'chat_history': 'self.chat_history', 'verbose': 'self.verbose'}), '(llm=self.llm, memory=self.memory, readonly_memory=self.\n readonly_memory, chat_history=self.chat_history, verbose=self.verbose)\n', (2360, 2491), False, 'from tech_agents.dispatcher import MainDispatcherAgent\n'), ((2018, 2156), 'tech_agents.command.Command', 'Command', ([], {'llm': 'self.llm', 'memory': 'self.memory', 'readonly_memory': 'self.readonly_memory', 'chat_history': 'self.chat_history', 'verbose': 'self.verbose'}), '(llm=self.llm, memory=self.memory, readonly_memory=self.\n readonly_memory, chat_history=self.chat_history, verbose=self.verbose)\n', (2025, 2156), False, 'from tech_agents.command import Command, check_command\n')]
from typing import List, TypedDict import tiktoken from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage from langchain_openai import ChatOpenAI from app.enums.langchain_enums import LangchainRole from config import langchain_config, settings class MessagesType(TypedDict): role: str content: str class LangchainSendChatService: def __init__(self, model_name: str, messages: List[MessagesType]): if self._check_model_name() is False: raise ValueError("model_name is invalid.") self.model_name = model_name self.messages = messages def _check_model_name(self) -> bool: if self.model_name not in langchain_config.USEABLE_MODEL_NAME: return False return True def send_message( self, temperature: float = 0.7, max_tokens: int = 1000, ) -> str: langchain_messages = self._create_messages() if self._check_tokens(langchain_messages, max_tokens) is False: raise ValueError("model_name is invalid.") parameters = { "max_tokens": max_tokens, "model_name": self.model_name, "openai_api_key": settings.OPENAI_API_KEY, "temperature": temperature, } chat = ChatOpenAI(**parameters) result = chat.invoke(langchain_messages) return result.content def _check_tokens( self, langchain_messages: List[BaseMessage], max_tokens: int, ) -> bool: """ token数のチェック """ if len(langchain_messages) == 0: return False encode_name = langchain_config.ENCODE_NAME[self.model_name] tiktoken_enc = tiktoken.get_encoding(encode_name) total_tokens = 0 for langchain_message in langchain_messages: tokens = tiktoken_enc.encode(langchain_message.content) total_tokens += len(tokens) return langchain_config.MAX_TOKEN[self.model_name] > (total_tokens + max_tokens) def _create_messages(self) -> List[BaseMessage]: langchain_messages: List[BaseMessage] = [] for message in self.messages: if message["role"] == LangchainRole.AI.value: langchain_messages.append(AIMessage(content=message["content"])) continue if message["role"] == LangchainRole.HUMAN.value: langchain_messages.append(HumanMessage(content=message["content"])) continue if message["role"] == LangchainRole.SYSTEM.value: langchain_messages.append(SystemMessage(content=message["content"])) continue return langchain_messages
[ "langchain.schema.AIMessage", "langchain_openai.ChatOpenAI", "langchain.schema.SystemMessage", "langchain.schema.HumanMessage" ]
[((1294, 1318), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {}), '(**parameters)\n', (1304, 1318), False, 'from langchain_openai import ChatOpenAI\n'), ((1726, 1760), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encode_name'], {}), '(encode_name)\n', (1747, 1760), False, 'import tiktoken\n'), ((2281, 2318), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': "message['content']"}), "(content=message['content'])\n", (2290, 2318), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2449, 2489), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': "message['content']"}), "(content=message['content'])\n", (2461, 2489), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2621, 2662), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': "message['content']"}), "(content=message['content'])\n", (2634, 2662), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n')]
# import modules import telebot from telebot import * import logging import sqlite3 import os import langchain from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.document_loaders import TextLoader from langchain.document_loaders import DirectoryLoader from langchain.vectorstores import Chroma from langchain.prompts import PromptTemplate from langchain.chat_models import ChatOpenAI from langchain.chains import RetrievalQA # connect to the database conn = sqlite3.connect(r"main.db", check_same_thread=False) cur = conn.cursor() # start logging logging.basicConfig(level=logging.INFO, filename="../info.log", filemode='w') # init a bot with token from file bot_token_file = open("bot_token.txt", "r") API_KEY = bot_token_file.readline() bot_token_file.close() os.environ["API_KEY"] = API_KEY bot = telebot.TeleBot(API_KEY) # set the openai token token_file = open("openai_token.txt", "r") token = token_file.readline() token_file.close() os.environ["OPENAI_API_KEY"] = token docs_k = 65 # const number_of_goods = 6 # const goods = ["Philips EP2231/40", "Nivona CafeRomatica NICR 550", # list of goods "Delonghi ECAM 370.70.B", "Polaris PACM 2065AC", "Philips EP2030/10", "REDMOND RCM-1517"] langchain.debug = False # debug is off # read the vector databases vectordb_list = [] embedding = OpenAIEmbeddings() for i in range(number_of_goods): vectordb_list.append(Chroma(embedding_function=embedding, persist_directory="../output/"+str(i))) for vectordb in vectordb_list: print(vectordb._collection.count()) def get_info(itemID): question = "Расскажи об этой кофемашине" template = """Ты - полезный ИИ консультант для нашего магазина бытовой техники по продаже кофемашин. Твое задание - описать данную кофемашину. Говори только о достоинствах. Используйте следующие фрагменты контекста (Context), чтобы ответить на вопрос в конце (Question). Если вы не знаете ответа, просто скажите, что не знаете, не пытайтесь придумывать ответ. Сначала убедитесь, что прикрепленный текст имеет отношение к вопросу. Если вопрос не имеет отшения к тексту, ответьте, что вы не можете ответить на данный вопрос. Используйте максимум 15 предложений. Дайте ответ как можно более понятным, рассказывая кратко про все достинства именно данной кофемашины. Context: {context} Question: {question}""" QA_CHAIN_PROMPT = PromptTemplate.from_template(template) vectordb = vectordb_list[itemID] retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": docs_k}) llm = ChatOpenAI( model_name="gpt-3.5-turbo", temperature=0, max_tokens = 250) qa_chain = RetrievalQA.from_chain_type( llm, retriever=retriever, return_source_documents=True, chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}) result = qa_chain({"query": question}) return result["result"] def get_answer(itemID, question): template = """Ты - полезный ИИ консультант для нашего магазина бытовой техники по продаже кофемашин. Твое задание - понятно ответить на вопрос покупателя. Используйте следующие фрагменты контекста (Context), чтобы ответить на вопрос в конце (Question). Если вы не знаете ответа, просто скажите, что не знаете, не пытайтесь придумывать ответ. Сначала убедитесь, что прикрепленный текст имеет отношение к вопросу. Если вопрос не имеет отшения к тексту, ответьте, что вы не можете ответить на данный вопрос. Используйте максимум 15 предложений. Дайте ответ как можно более понятным. Говорите грамотно. Context: {context} Question: {question}""" QA_CHAIN_PROMPT = PromptTemplate.from_template(template) vectordb = vectordb_list[itemID] retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": docs_k}) llm = ChatOpenAI( model_name="gpt-3.5-turbo", temperature=0, max_tokens = 250) qa_chain = RetrievalQA.from_chain_type( llm, retriever=retriever, return_source_documents=True, chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}) result = qa_chain({"query": question}) return result["result"] def check_step(step, id): cur.execute("SELECT status FROM user WHERE userID = ?", (id,)) fetch_result = cur.fetchone() if fetch_result == None: return False elif step in fetch_result: return True else: return False def get_itemID(userID): cur.execute("SELECT itemID FROM user WHERE userID = ?", (userID,)) fetch_result = cur.fetchone() return fetch_result[0] @bot.message_handler(commands=["start"]) def start_message(message): keyboard = types.ReplyKeyboardMarkup( resize_keyboard = True, one_time_keyboard=True ) zero_machine = types.KeyboardButton(text="Philips EP2231/40") first_machine = types.KeyboardButton(text="Nivona CafeRomatica NICR 550") second_machine = types.KeyboardButton(text="Delonghi ECAM 370.70.B") third_machine = types.KeyboardButton(text="Polaris PACM 2065AC") fourth_machine = types.KeyboardButton(text="Philips EP2030/10") fifth_machine = types.KeyboardButton(text="REDMOND RCM-1517") keyboard.row(zero_machine, first_machine) keyboard.row(second_machine, third_machine) keyboard.row(fourth_machine, fifth_machine) bot.send_message(message.chat.id, "📂 Главное меню") bot.send_message(message.chat.id, "📝 Выберите интересующий Вас товар или напишите его имя вручную", reply_markup=keyboard) try: cur.execute("INSERT INTO user VALUES (?, ?, ?);", (message.chat.id, "menu", 0)) except: cur.execute("UPDATE user SET status = ? WHERE userID = ?;", ("menu", message.chat.id)) conn.commit() @bot.message_handler(content_types="text", func=lambda message: check_step("menu", message.chat.id)) def machine_description(message): if message.text in goods: keyboard = types.ReplyKeyboardMarkup( resize_keyboard=True, one_time_keyboard=True ) back_to_menu_button = types.KeyboardButton(text="🗃️ Назад в меню") keyboard.add(back_to_menu_button) bot.send_message(message.chat.id, """⚙️ Запрос принят. Ожидайте ответа...\nВы выбрали -> {}""".format(message.text)) description = get_info(goods.index(message.text)) bot.send_message(message.chat.id, description) bot.send_message(message.chat.id, """🔎 Сейчас Вы можете задать вопросы об этом товаре или вернуться в главное меню.""", reply_markup=keyboard) # change user status in db cur.execute("UPDATE user SET status = ?, itemID = ? WHERE userID = ?;", ("chat", goods.index(message.text), message.chat.id)) conn.commit() else: bot.send_message(message.chat.id, "❌ Запрос отклонён. Такого товара не существует!") @bot.message_handler(content_types="text", func= lambda message: check_step("chat", message.chat.id)) def chat_with_ai(message): keyboard = types.ReplyKeyboardMarkup( resize_keyboard=True, one_time_keyboard=True ) back_to_menu_button = types.KeyboardButton(text="🗃️ Назад в меню") keyboard.add(back_to_menu_button) if message.text == back_to_menu_button.text: bot.send_message(message.chat.id, "⛓️ Возврат в главное меню") cur.execute("UPDATE user SET status = ? WHERE userID = ?;", ("menu", message.chat.id)) conn.commit() keyboard = types.ReplyKeyboardMarkup( resize_keyboard = True, one_time_keyboard=True ) zero_machine = types.KeyboardButton(text="Philips EP2231/40") first_machine = types.KeyboardButton(text="Nivona CafeRomatica NICR 550") second_machine = types.KeyboardButton(text="Delonghi ECAM 370.70.B") third_machine = types.KeyboardButton(text="Polaris PACM 2065AC") fourth_machine = types.KeyboardButton(text="Philips EP2030/10") fifth_machine = types.KeyboardButton(text="REDMOND RCM-1517") keyboard.row(zero_machine, first_machine) keyboard.row(second_machine, third_machine) keyboard.row(fourth_machine, fifth_machine) bot.send_message(message.chat.id, "📂 Главное меню") bot.send_message(message.chat.id, "📝 Выберите интересующий Вас товар или напишите его имя вручную", reply_markup=keyboard) else: itemID = get_itemID(message.chat.id) answer = get_answer(itemID, message.text) bot.send_message(message.chat.id, answer, reply_markup=keyboard) bot.infinity_polling(timeout=10, long_polling_timeout = 5)
[ "langchain.prompts.PromptTemplate.from_template", "langchain.chains.RetrievalQA.from_chain_type", "langchain.chat_models.ChatOpenAI", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((571, 622), 'sqlite3.connect', 'sqlite3.connect', (['"""main.db"""'], {'check_same_thread': '(False)'}), "('main.db', check_same_thread=False)\n", (586, 622), False, 'import sqlite3\n'), ((661, 738), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'filename': '"""../info.log"""', 'filemode': '"""w"""'}), "(level=logging.INFO, filename='../info.log', filemode='w')\n", (680, 738), False, 'import logging\n'), ((915, 939), 'telebot.TeleBot', 'telebot.TeleBot', (['API_KEY'], {}), '(API_KEY)\n', (930, 939), False, 'import telebot\n'), ((1427, 1445), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1443, 1445), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2561, 2599), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (2589, 2599), False, 'from langchain.prompts import PromptTemplate\n'), ((2745, 2814), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'max_tokens': '(250)'}), "(model_name='gpt-3.5-turbo', temperature=0, max_tokens=250)\n", (2755, 2814), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2857, 2996), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', (['llm'], {'retriever': 'retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': QA_CHAIN_PROMPT}"}), "(llm, retriever=retriever,\n return_source_documents=True, chain_type_kwargs={'prompt': QA_CHAIN_PROMPT}\n )\n", (2884, 2996), False, 'from langchain.chains import RetrievalQA\n'), ((3869, 3907), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (3897, 3907), False, 'from langchain.prompts import PromptTemplate\n'), ((4053, 4122), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'max_tokens': '(250)'}), "(model_name='gpt-3.5-turbo', temperature=0, max_tokens=250)\n", (4063, 4122), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4165, 4304), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', (['llm'], {'retriever': 'retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': QA_CHAIN_PROMPT}"}), "(llm, retriever=retriever,\n return_source_documents=True, chain_type_kwargs={'prompt': QA_CHAIN_PROMPT}\n )\n", (4192, 4304), False, 'from langchain.chains import RetrievalQA\n')]
import langchain as lc import openai as ai import datasets as ds import tiktoken as tk import os from langchain_openai import ChatOpenAI from dotenv import load_dotenv import os # Load environment variables from .env file load_dotenv() # Get the OpenAI API key from the environment variable openai_api_key = os.getenv("OPENAI_API_KEY") if openai_api_key is None: raise ValueError("No OpenAI API key found. Please set it in the .env file.") # Initialize the ChatOpenAI with the API key chat = ChatOpenAI(open_api_key=openai_api_key, model="gpt-3.5-turbo") from langchain.schema import ( SystemMessage, HumanMessage, AIMessage ) messages = [ SystemMessage(content="You are a helpful assistant."), HumanMessage(content="Hi AI, how are you today?."), AIMessage(content="I am great, thank you. How can I help you?"), HumanMessage(content="I am looking for a restaurant in the center of Berlin."), ]
[ "langchain.schema.AIMessage", "langchain_openai.ChatOpenAI", "langchain.schema.SystemMessage", "langchain.schema.HumanMessage" ]
[((224, 237), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (235, 237), False, 'from dotenv import load_dotenv\n'), ((311, 338), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (320, 338), False, 'import os\n'), ((501, 563), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'open_api_key': 'openai_api_key', 'model': '"""gpt-3.5-turbo"""'}), "(open_api_key=openai_api_key, model='gpt-3.5-turbo')\n", (511, 563), False, 'from langchain_openai import ChatOpenAI\n'), ((667, 720), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a helpful assistant."""'}), "(content='You are a helpful assistant.')\n", (680, 720), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n'), ((726, 776), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Hi AI, how are you today?."""'}), "(content='Hi AI, how are you today?.')\n", (738, 776), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n'), ((782, 845), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': '"""I am great, thank you. How can I help you?"""'}), "(content='I am great, thank you. How can I help you?')\n", (791, 845), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n'), ((851, 929), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""I am looking for a restaurant in the center of Berlin."""'}), "(content='I am looking for a restaurant in the center of Berlin.')\n", (863, 929), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n')]
"""Push and pull to the LangChain Hub.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional from langchain.load.dump import dumps from langchain.load.load import loads from langchain.utils import get_from_env if TYPE_CHECKING: from langchainhub import Client def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client: try: from langchainhub import Client except ImportError as e: raise ImportError( "Could not import langchainhub, please install with `pip install " "langchainhub`." ) from e api_url = api_url or get_from_env("api_url", "LANGCHAIN_HUB_API_URL") api_key = api_key or get_from_env("api_key", "LANGCHAIN_HUB_API_KEY", default="") api_key = api_key or get_from_env("api_key", "LANGCHAIN_API_KEY") return Client(api_url, api_key=api_key) def push( repo_full_name: str, object: Any, *, api_url: Optional[str] = None, api_key: Optional[str] = None, parent_commit_hash: Optional[str] = "latest", ) -> str: """ Pushes an object to the hub and returns the URL. """ client = _get_client(api_url=api_url, api_key=api_key) manifest_json = dumps(object) resp = client.push( repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash ) commit_hash: str = resp["commit"]["commit_hash"] return commit_hash def pull( owner_repo_commit: str, *, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> Any: """ Pulls an object from the hub and returns it. """ client = _get_client(api_url=api_url, api_key=api_key) resp: str = client.pull(owner_repo_commit) return loads(resp)
[ "langchain.load.load.loads", "langchainhub.Client", "langchain.load.dump.dumps", "langchain.utils.get_from_env" ]
[((862, 894), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (868, 894), False, 'from langchainhub import Client\n'), ((1234, 1247), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1239, 1247), False, 'from langchain.load.dump import dumps\n'), ((1740, 1751), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (1745, 1751), False, 'from langchain.load.load import loads\n'), ((646, 694), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_url"""', '"""LANGCHAIN_HUB_API_URL"""'], {}), "('api_url', 'LANGCHAIN_HUB_API_URL')\n", (658, 694), False, 'from langchain.utils import get_from_env\n'), ((720, 780), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_HUB_API_KEY"""'], {'default': '""""""'}), "('api_key', 'LANGCHAIN_HUB_API_KEY', default='')\n", (732, 780), False, 'from langchain.utils import get_from_env\n'), ((806, 850), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_API_KEY"""'], {}), "('api_key', 'LANGCHAIN_API_KEY')\n", (818, 850), False, 'from langchain.utils import get_from_env\n')]
from datetime import timedelta import os import subprocess import whisper import tempfile import argparse import langchain from langchain.chat_models import ChatOpenAI, ChatGooglePalm from langchain.schema import HumanMessage, SystemMessage, AIMessage from langchain.prompts import ( ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain.chains import LLMChain from langchain.callbacks import get_openai_callback from tqdm import tqdm def get_translate_chain(from_lang, to_lang): template=f"You are a helpful assistant that translates {from_lang} to {to_lang}." system_message_prompt = SystemMessagePromptTemplate.from_template(template) human_template="Please translate \"{text}\""+f" from {from_lang} to {to_lang}. Give me the translated {to_lang} directly without saying anything else, do not use \"." human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) # get a chat completion from the formatted messages chat = ChatOpenAI() chain = LLMChain(llm=chat, prompt=chat_prompt, verbose=True) return chain def gen_srt(video_path, model_name="medium", from_language="English", to_language="Chinese", embed=False, translate=True): with tempfile.TemporaryDirectory() as temp_dir: # 1. use ffmpeg to extract audio from video and save it to Temp folder # Path to the temporary audio file temp_audio_path = os.path.join(temp_dir, "extracted_audio.wav") # Use ffmpeg to extract audio from video print("Extracting audio from video...") command = f"ffmpeg -i {video_path} -vn -ar 44100 -ac 2 -b:a 192k {temp_audio_path}" # Execute the command subprocess.call(command, shell=True) model = whisper.load_model(model_name) transcribe = model.transcribe(audio=temp_audio_path, language=from_language) segments = transcribe['segments'] # 2. Use whisper to transcribe audio and save segments to srt file if translate: with get_openai_callback() as cb: chain = get_translate_chain(from_language, to_language) for segment in tqdm(segments): segment['text'] = chain(segment['text'])['text'] print(cb) # 3. Generate the SRT file srtFilename = video_path.split(".")[0] + ".srt" # overwrite the file if it already exists if os.path.exists(srtFilename): os.remove(srtFilename) for segment in segments: startTime = str(0)+str(timedelta(seconds=int(segment['start'])))+',000' endTime = str(0)+str(timedelta(seconds=int(segment['end'])))+',000' text = segment['text'] segmentId = segment['id']+1 segment = f"{segmentId}\n{startTime} --> {endTime}\n{text[1:] if text[0] == ' ' else text}\n\n" with open(srtFilename, 'a', encoding='utf-8') as srtFile: srtFile.write(segment) # 4. Use FFMPEG to embed srt file into video if not embed: return output_filename = video_path.split(".")[0] + "_subtitled.mp4" if os.path.exists(output_filename): os.remove(output_filename) embed_command = f"ffmpeg -i {video_path} -vf subtitles={srtFilename} {output_filename}" subprocess.call(embed_command, shell=True) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Process some arguments') # Add the arguments parser.add_argument('-i', type=str, required=True, dest='input_file', help='Input file name') parser.add_argument('-m', type=str, default='medium', dest='model_name', help='Model type, default is "medium"') parser.add_argument('-f', type=str, default='English', dest='from_lang', help='Translate from language, default is "English"') parser.add_argument('-t', type=str, default='Chinese', dest='to_lang', help='Translate to language, default is "Chinese"') parser.add_argument('--embed', dest='embed', action='store_true', help='Whether to Embed subtitles, default is False') parser.add_argument('--translate', dest='translate', action='store_true', help='Whether to Translate, default is False') args = parser.parse_args() gen_srt(args.input_file, model_name=args.model_name, embed=args.embed, translate=args.translate, from_language=args.from_lang, to_language=args.to_lang)
[ "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.chat_models.ChatOpenAI", "langchain.prompts.ChatPromptTemplate.from_messages", "langchain.callbacks.get_openai_callback", "langchain.chains.LLMChain", "langchain.prompts.SystemMessagePromptTemplate.from_template" ]
[((696, 747), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (737, 747), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((946, 1002), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (986, 1002), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1021, 1100), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (1053, 1100), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1169, 1181), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1179, 1181), False, 'from langchain.chat_models import ChatOpenAI, ChatGooglePalm\n'), ((1194, 1246), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'chat', 'prompt': 'chat_prompt', 'verbose': '(True)'}), '(llm=chat, prompt=chat_prompt, verbose=True)\n', (1202, 1246), False, 'from langchain.chains import LLMChain\n'), ((3618, 3679), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some arguments"""'}), "(description='Process some arguments')\n", (3641, 3679), False, 'import argparse\n'), ((1398, 1427), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1425, 1427), False, 'import tempfile\n'), ((1589, 1634), 'os.path.join', 'os.path.join', (['temp_dir', '"""extracted_audio.wav"""'], {}), "(temp_dir, 'extracted_audio.wav')\n", (1601, 1634), False, 'import os\n'), ((1880, 1916), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1895, 1916), False, 'import subprocess\n'), ((1934, 1964), 'whisper.load_model', 'whisper.load_model', (['model_name'], {}), '(model_name)\n', (1952, 1964), False, 'import whisper\n'), ((2619, 2646), 'os.path.exists', 'os.path.exists', (['srtFilename'], {}), '(srtFilename)\n', (2633, 2646), False, 'import os\n'), ((3357, 3388), 'os.path.exists', 'os.path.exists', (['output_filename'], {}), '(output_filename)\n', (3371, 3388), False, 'import os\n'), ((3533, 3575), 'subprocess.call', 'subprocess.call', (['embed_command'], {'shell': '(True)'}), '(embed_command, shell=True)\n', (3548, 3575), False, 'import subprocess\n'), ((2660, 2682), 'os.remove', 'os.remove', (['srtFilename'], {}), '(srtFilename)\n', (2669, 2682), False, 'import os\n'), ((3402, 3428), 'os.remove', 'os.remove', (['output_filename'], {}), '(output_filename)\n', (3411, 3428), False, 'import os\n'), ((2215, 2236), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2234, 2236), False, 'from langchain.callbacks import get_openai_callback\n'), ((2347, 2361), 'tqdm.tqdm', 'tqdm', (['segments'], {}), '(segments)\n', (2351, 2361), False, 'from tqdm import tqdm\n')]
import langchain_helper as lch import streamlit as st st.title('Pets name generator') animal_type = st.sidebar.selectbox("What is your pet?", ("cat", "dog", "bird", "fish")) if animal_type == 'cat': pet_color = st.sidebar.text_area(label="What color is your cat?", max_chars=15) if animal_type == 'dog': pet_color = st.sidebar.text_area(label="What color is your dog?", max_chars=15) if animal_type == 'bird': pet_color = st.sidebar.text_area(label="What color is your bird?", max_chars=15) if pet_color: response = lch.generate_pet_nam(animal_type, pet_color) st.text(response['pet_name'])
[ "langchain_helper.generate_pet_nam" ]
[((55, 86), 'streamlit.title', 'st.title', (['"""Pets name generator"""'], {}), "('Pets name generator')\n", (63, 86), True, 'import streamlit as st\n'), ((102, 175), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""What is your pet?"""', "('cat', 'dog', 'bird', 'fish')"], {}), "('What is your pet?', ('cat', 'dog', 'bird', 'fish'))\n", (122, 175), True, 'import streamlit as st\n'), ((217, 284), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""What color is your cat?"""', 'max_chars': '(15)'}), "(label='What color is your cat?', max_chars=15)\n", (237, 284), True, 'import streamlit as st\n'), ((326, 393), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""What color is your dog?"""', 'max_chars': '(15)'}), "(label='What color is your dog?', max_chars=15)\n", (346, 393), True, 'import streamlit as st\n'), ((436, 504), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""What color is your bird?"""', 'max_chars': '(15)'}), "(label='What color is your bird?', max_chars=15)\n", (456, 504), True, 'import streamlit as st\n'), ((535, 579), 'langchain_helper.generate_pet_nam', 'lch.generate_pet_nam', (['animal_type', 'pet_color'], {}), '(animal_type, pet_color)\n', (555, 579), True, 'import langchain_helper as lch\n'), ((584, 613), 'streamlit.text', 'st.text', (["response['pet_name']"], {}), "(response['pet_name'])\n", (591, 613), True, 'import streamlit as st\n')]
from langchain import OpenAI, LLMChain from langchain.callbacks import StdOutCallbackHandler from langchain.chat_models import ChatOpenAI from src.agents.chat_chain import ChatChain from src.agents.graphdb_traversal_chain import GraphDBTraversalChain, mem_query_template, mem_system_message from src.memory.triple_modal_memory import TripleModalMemory import os from dotenv import load_dotenv # Set up the cache import langchain from langchain.cache import SQLiteCache langchain.llm_cache = SQLiteCache(database_path=".langchain.db") # initialize the memory load_dotenv() uri = os.getenv("NEO4J_URI") user = os.getenv("NEO4J_USER") password = os.getenv("NEO4J_PASSWORD") mem = TripleModalMemory(uri, user, password) # Create memory from docks or load from file if it exists ingested = os.path.exists('../data/triple_modal_memory.faiss') if not ingested: knowledge_path = r'C:\Users\colli\Documents\AIPapers' mem.ingest_docs(knowledge_path) mem.save() print("Memory initialized and saved.") else: mem.load() print("Memory loaded.") handler = StdOutCallbackHandler() llm = ChatOpenAI( model_name="gpt-4", #"gpt-3.5-turbo" temperature=0, verbose=True ) chain = ChatChain(llm=llm, prompt=mem_query_template, callbacks=[handler], system_message=mem_system_message) knowledge_base_query_agent = GraphDBTraversalChain(llm_chain=chain, graph_vector_store=mem.vector_store) # Example Research questions: # What are different methods of providing language models with additional context to better answer questions? # How can semantic search be used in conjunction with large language models in order to better answer questions? # What are some techniques for achieving better general intelligence in language models? def main_loop(): try: while True: question = input("Enter a question: ") print(knowledge_base_query_agent.run(question)) except KeyboardInterrupt: print("Shutdown: Saving...") mem.save() print("Shutdown: Complete") else: print("Completed all tasks.") if __name__ == '__main__': main_loop()
[ "langchain.callbacks.StdOutCallbackHandler", "langchain.chat_models.ChatOpenAI", "langchain.cache.SQLiteCache" ]
[((495, 537), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (506, 537), False, 'from langchain.cache import SQLiteCache\n'), ((563, 576), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (574, 576), False, 'from dotenv import load_dotenv\n'), ((583, 605), 'os.getenv', 'os.getenv', (['"""NEO4J_URI"""'], {}), "('NEO4J_URI')\n", (592, 605), False, 'import os\n'), ((613, 636), 'os.getenv', 'os.getenv', (['"""NEO4J_USER"""'], {}), "('NEO4J_USER')\n", (622, 636), False, 'import os\n'), ((648, 675), 'os.getenv', 'os.getenv', (['"""NEO4J_PASSWORD"""'], {}), "('NEO4J_PASSWORD')\n", (657, 675), False, 'import os\n'), ((683, 721), 'src.memory.triple_modal_memory.TripleModalMemory', 'TripleModalMemory', (['uri', 'user', 'password'], {}), '(uri, user, password)\n', (700, 721), False, 'from src.memory.triple_modal_memory import TripleModalMemory\n'), ((792, 843), 'os.path.exists', 'os.path.exists', (['"""../data/triple_modal_memory.faiss"""'], {}), "('../data/triple_modal_memory.faiss')\n", (806, 843), False, 'import os\n'), ((1074, 1097), 'langchain.callbacks.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (1095, 1097), False, 'from langchain.callbacks import StdOutCallbackHandler\n'), ((1105, 1164), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0)', 'verbose': '(True)'}), "(model_name='gpt-4', temperature=0, verbose=True)\n", (1115, 1164), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1204, 1309), 'src.agents.chat_chain.ChatChain', 'ChatChain', ([], {'llm': 'llm', 'prompt': 'mem_query_template', 'callbacks': '[handler]', 'system_message': 'mem_system_message'}), '(llm=llm, prompt=mem_query_template, callbacks=[handler],\n system_message=mem_system_message)\n', (1213, 1309), False, 'from src.agents.chat_chain import ChatChain\n'), ((1335, 1410), 'src.agents.graphdb_traversal_chain.GraphDBTraversalChain', 'GraphDBTraversalChain', ([], {'llm_chain': 'chain', 'graph_vector_store': 'mem.vector_store'}), '(llm_chain=chain, graph_vector_store=mem.vector_store)\n', (1356, 1410), False, 'from src.agents.graphdb_traversal_chain import GraphDBTraversalChain, mem_query_template, mem_system_message\n')]
from __future__ import annotations import logging from functools import lru_cache from typing import List, Optional import langchain from langchain.agents import AgentExecutor, Tool, initialize_agent from langchain.agents.agent_types import AgentType from langchain.callbacks import get_openai_callback from langchain.chat_models import ChatOpenAI from langchain.chat_models.base import BaseChatModel from langchain.memory.chat_memory import BaseChatMemory from langchain.schema.messages import BaseMessage from langchain_experimental.plan_and_execute import ( PlanAndExecute, load_agent_executor, load_chat_planner, ) from expert_gpts.llms.agent import HUMAN_SUFFIX, SYSTEM_PREFIX, ConvoOutputCustomParser from shared.llm_manager_base import BaseLLMManager, Cost from shared.llms.openai import GPT_3_5_TURBO, GPT_4, TEXT_ADA_EMBEDDING from shared.llms.system_prompts import PLANNER_SYSTEM_PROMPT langchain.debug = True logger = logging.getLogger(__name__) COSTS = { GPT_3_5_TURBO: Cost(prompt=0.0015, completion=0.002), GPT_4: Cost(prompt=0.03, completion=0.05), TEXT_ADA_EMBEDDING: Cost(prompt=0.0001, completion=0.0001), } class OpenAIApiManager(BaseLLMManager): _agents = {} def __init__(self): super().__init__(COSTS) def get_agent_executor( self, llm, agent_type: AgentType = AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, memory: Optional[BaseChatMemory] = None, tools: Optional[List[Tool]] = None, system_message: Optional[str] = SYSTEM_PREFIX, human_message: Optional[str] = HUMAN_SUFFIX, ) -> AgentExecutor: agent_kwargs = { "output_parser": ConvoOutputCustomParser(), } if system_message: agent_kwargs["system_message"] = system_message if human_message: agent_kwargs["human_message"] = human_message return initialize_agent( tools=tools, llm=llm, agent=agent_type, memory=memory, agent_kwargs=agent_kwargs, ) def create_chat_completion( self, messages: List[BaseMessage], # type: ignore model: str | None = GPT_3_5_TURBO, temperature: float = 0, max_tokens: int | None = None, deployment_id=None, openai_api_key=None, ) -> str: llm = self.get_llm(max_tokens, model, temperature) with get_openai_callback() as cb: response = llm(messages, callbacks=[self.callbacks_handler]) self.update_cost(cb) return response.content def create_chat_completion_with_agent( self, user_input: str, # type: ignore agent_type: AgentType = AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, model: str | None = GPT_3_5_TURBO, agent_key: str = "default", temperature: float = 0, max_tokens: int | None = None, memory: Optional[BaseChatMemory] = None, tools: Optional[List[Tool]] = None, ) -> str: llm = self.get_llm(max_tokens, model, temperature) if agent_key not in self._agents: self._agents[agent_key] = self.get_agent_executor( llm, agent_type, memory, tools ) agent = self._agents[agent_key] with get_openai_callback() as cb: response = agent.run(input=user_input, callbacks=[self.callbacks_handler]) self.update_cost(cb) return response def execute_plan( self, user_input: str, # type: ignore model: str | None = GPT_3_5_TURBO, agent_key: str = "default_plan", temperature: float = 0, max_tokens: int | None = None, tools: Optional[List[Tool]] = None, ) -> str: llm = self.get_llm(max_tokens, model, temperature) if agent_key not in self._agents: planner = load_chat_planner(llm, system_prompt=PLANNER_SYSTEM_PROMPT) executor = load_agent_executor(llm, tools, verbose=True) agent = PlanAndExecute(planner=planner, executor=executor, verbose=True) self._agents[agent_key] = agent agent = self._agents[agent_key] with get_openai_callback() as cb: response = agent.run(input=user_input, callbacks=[self.callbacks_handler]) self.update_cost(cb) return response @lru_cache def get_llm( self, max_tokens, model, temperature, as_predictor: bool = False ) -> BaseChatModel: llm = ChatOpenAI( model_name=model, temperature=temperature, max_tokens=max_tokens, ) return llm
[ "langchain.agents.initialize_agent", "langchain_experimental.plan_and_execute.PlanAndExecute", "langchain.chat_models.ChatOpenAI", "langchain_experimental.plan_and_execute.load_chat_planner", "langchain.callbacks.get_openai_callback", "langchain_experimental.plan_and_execute.load_agent_executor" ]
[((946, 973), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (963, 973), False, 'import logging\n'), ((1004, 1041), 'shared.llm_manager_base.Cost', 'Cost', ([], {'prompt': '(0.0015)', 'completion': '(0.002)'}), '(prompt=0.0015, completion=0.002)\n', (1008, 1041), False, 'from shared.llm_manager_base import BaseLLMManager, Cost\n'), ((1054, 1088), 'shared.llm_manager_base.Cost', 'Cost', ([], {'prompt': '(0.03)', 'completion': '(0.05)'}), '(prompt=0.03, completion=0.05)\n', (1058, 1088), False, 'from shared.llm_manager_base import BaseLLMManager, Cost\n'), ((1114, 1152), 'shared.llm_manager_base.Cost', 'Cost', ([], {'prompt': '(0.0001)', 'completion': '(0.0001)'}), '(prompt=0.0001, completion=0.0001)\n', (1118, 1152), False, 'from shared.llm_manager_base import BaseLLMManager, Cost\n'), ((1906, 2008), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'llm', 'agent': 'agent_type', 'memory': 'memory', 'agent_kwargs': 'agent_kwargs'}), '(tools=tools, llm=llm, agent=agent_type, memory=memory,\n agent_kwargs=agent_kwargs)\n', (1922, 2008), False, 'from langchain.agents import AgentExecutor, Tool, initialize_agent\n'), ((4514, 4590), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model', 'temperature': 'temperature', 'max_tokens': 'max_tokens'}), '(model_name=model, temperature=temperature, max_tokens=max_tokens)\n', (4524, 4590), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1683, 1708), 'expert_gpts.llms.agent.ConvoOutputCustomParser', 'ConvoOutputCustomParser', ([], {}), '()\n', (1706, 1708), False, 'from expert_gpts.llms.agent import HUMAN_SUFFIX, SYSTEM_PREFIX, ConvoOutputCustomParser\n'), ((2434, 2455), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2453, 2455), False, 'from langchain.callbacks import get_openai_callback\n'), ((3307, 3328), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (3326, 3328), False, 'from langchain.callbacks import get_openai_callback\n'), ((3890, 3949), 'langchain_experimental.plan_and_execute.load_chat_planner', 'load_chat_planner', (['llm'], {'system_prompt': 'PLANNER_SYSTEM_PROMPT'}), '(llm, system_prompt=PLANNER_SYSTEM_PROMPT)\n', (3907, 3949), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((3973, 4018), 'langchain_experimental.plan_and_execute.load_agent_executor', 'load_agent_executor', (['llm', 'tools'], {'verbose': '(True)'}), '(llm, tools, verbose=True)\n', (3992, 4018), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((4039, 4103), 'langchain_experimental.plan_and_execute.PlanAndExecute', 'PlanAndExecute', ([], {'planner': 'planner', 'executor': 'executor', 'verbose': '(True)'}), '(planner=planner, executor=executor, verbose=True)\n', (4053, 4103), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((4201, 4222), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (4220, 4222), False, 'from langchain.callbacks import get_openai_callback\n')]
import os import utils import traceback from langchain.chains.qa_with_sources import load_qa_with_sources_chain from langchain.chains import ConversationChain from langchain.llms import OpenAI import langchain from langchain.cache import InMemoryCache from langchain.llms import OpenAI from langchain.chains.conversation.memory import ConversationSummaryBufferMemory,ConversationBufferMemory,ConversationBufferWindowMemory from langchain.prompts import PromptTemplate from embeddings import EmbeddingsManager from flask import Flask, send_from_directory import json import time import threading import secrets import string import hashlib from flask import request from langchain.cache import InMemoryCache,SQLiteCache import re import requests from waitress import serve from translator import Translator import sys from query.discoursequery import DiscourseQuery from query.embeddingsquery import EmbeddingsQuery from Summary import Summary import uuid from langchain.llms import NLPCloud from langchain.llms import AI21 from langchain.llms import Cohere from SmartCache import SmartCache CONFIG=None QUERIERS=[] args=sys.argv confiFile=args[1] if len(args)>1 else "config.json" print("Use config file", confiFile) with open(confiFile, "r") as f: CONFIG=json.load(f) EmbeddingsManager.init(CONFIG) Summary.init(CONFIG) QUERIERS=[ EmbeddingsQuery(CONFIG), DiscourseQuery( CONFIG,CONFIG["JME_HUB_URL"], searchFilter=CONFIG["JME_HUB_SEARCH_FILTER"], knowledgeCutoff=CONFIG["JME_HUB_KNOWLEDGE_CUTOFF"] ) ] Translator.init(CONFIG) def getAffineDocs(question,context,keywords,shortQuestion, wordSalad=None, unitFilter=None, maxFragmentsToReturn=3, maxFragmentsToSelect=12,merge=False): affineDocs=[] for q in QUERIERS: print("Get affine docs from",q,"using question",question,"with context",context,"and keywords",keywords) t=time.time() v=q.getAffineDocs( question, context, keywords,shortQuestion, wordSalad, unitFilter, maxFragmentsToReturn=maxFragmentsToReturn, maxFragmentsToSelect=maxFragmentsToSelect, merge=merge ) print("Completed in",time.time()-t,"seconds.") if v!=None: affineDocs.extend(v) return affineDocs def rewriteError(error): if error.startswith("Rate limit reached ") : return "Rate limit." def rewrite(question): # replace app, applet, game, application with simple application question=re.sub(r"\b(app|applet|game|application)\b", "simple application", question, flags=re.IGNORECASE) return question def createChain(): # Backward compatibility model_name=CONFIG.get("OPENAI_MODEL","text-davinci-003") llm_name="openai" ######## llmx=CONFIG.get("LLM_MODEL",None) # "openai:text-davinci-003" "cohere:xlarge" if llmx!=None: if ":" in llmx: llm_name,model_name=llmx.split(":") else: llm_name,model_name=llmx.split(".") template = "" template_path="prompts/"+llm_name+"."+model_name+".txt" if not os.path.exists(template_path): template_path="prompts/openai.text-davinci-003.txt" with open(template_path, "r") as f: template=f.read() prompt = PromptTemplate( input_variables=[ "history", "question", "summaries"], template=template ) llm=None history_length=700 if llm_name=="openai": max_tokens=512 temperature=0.0 if model_name=="text-davinci-003": max_tokens=512 elif model_name=="code-davinci-002": max_tokens=1024 #history_length=1024 llm=OpenAI( temperature=temperature, model_name=model_name, max_tokens=max_tokens, ) elif llm_name=="cohere": llm=Cohere( model=model_name, max_tokens=700 ) history_length=200 elif llm_name=="ai21": llm=AI21( temperature=0.7, model=model_name, ) elif llm_name=="nlpcloud": llm=NLPCloud( model_name=model_name, ) else: raise Exception("Unknown LLM "+llm_name) print("Use model ",model_name,"from",llm_name) memory=ConversationSummaryBufferMemory(llm=llm, max_token_limit=history_length,human_prefix="QUESTION",ai_prefix="ANSWER", memory_key="history", input_key="question") chain = load_qa_with_sources_chain( llm, memory=memory, prompt=prompt, verbose=True, ) return chain def extractQuestionData(question,wordSalad): shortQuestion=Summary.summarizeMarkdown(question,min_length=100,max_length=1024,withCodeBlocks=False) context=Summary.summarizeText(wordSalad,min_length=20,max_length=32) keywords=[] keywords.extend(Summary.getKeywords(shortQuestion,2)) keywords.extend(Summary.getKeywords(Summary.summarizeText(wordSalad,min_length=10,max_length=20),3)) return [question,shortQuestion,context,keywords,wordSalad] def queryChain(chain,question): wordSalad="" for h in chain.memory.buffer: wordSalad+=h+" " wordSalad+=" "+question [question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda :extractQuestionData(question,wordSalad)) affineDocs=utils.enqueue(lambda :getAffineDocs(question,context,keywords,shortQuestion,wordSalad)) print("Found ",len(affineDocs), " affine docs") print("Q: ", shortQuestion) output=chain({"input_documents": affineDocs, "question": shortQuestion}, return_only_outputs=True) print("A :",output) return output sessions={} langchain.llm_cache = SmartCache(CONFIG)#SQLiteCache(database_path=CONFIG["CACHE_PATH"]+"/langchain.db") def clearSessions(): while True: time.sleep(60*5) for session in sessions: if sessions[session]["timeout"] < time.time(): del sessions[session] threading.Thread(target=clearSessions).start() def createSessionSecret(): hex_chars = string.hexdigits timeHash=hashlib.sha256(str(time.time()).encode("utf-8")).hexdigest()[:12] return ''.join(secrets.choice(hex_chars) for i in range(64))+timeHash app = Flask(__name__) @app.route("/langs") def langs(): return json.dumps(Translator.getLangs()) @app.route("/session",methods = ['POST']) def session(): body=request.get_json() lang=body["lang"] if "lang" in body else "en" if lang=="auto": lang="en" if not "sessionSecret" in body or body["sessionSecret"].strip()=="": sessionSecret=createSessionSecret() else: sessionSecret=body["sessionSecret"] if sessionSecret not in sessions: sessions[sessionSecret]={ "chain": createChain(), "timeout": time.time()+60*30 } else: sessions[sessionSecret]["timeout"]=time.time()+60*30 welcomeText="" welcomeText+=Translator.translate("en", lang,"Hi there! I'm an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics.") welcomeText+="<br><br>" welcomeText+="<footer><span class=\"material-symbols-outlined\">tips_and_updates</span><span>"+Translator.translate("en", lang,"This chat bot is intended to provide helpful information, but accuracy is not guaranteed.")+"</span></footer>" return json.dumps( { "sessionSecret": sessionSecret, "helloText":Translator.translate("en",lang,"Who are you?"), "welcomeText":welcomeText }) @app.route("/query",methods = ['POST']) def query(): try: body=request.get_json() question=rewrite(body["question"]) lang=body["lang"] if "lang" in body else "en" if lang == "auto": lang=Translator.detect(question) if lang!="en": question=Translator.translate(lang,"en",question) if len(question)==0: raise Exception("Question is empty") sessionSecret=body["sessionSecret"] if sessionSecret not in sessions: return json.dumps({"error": "Session expired"}) chain=sessions[sessionSecret]["chain"] output=queryChain(chain,question) if lang!="en": output["output_text"]=Translator.translate("en",lang,output["output_text"]) #print(chain.memory.buffer) return json.dumps(output) except Exception as e: print(e) print(traceback.format_exc()) errorStr=str(e) errorStr=rewriteError(errorStr) return json.dumps({"error": errorStr}) @app.route('/<path:filename>') def serveFrontend(filename): return send_from_directory('frontend/', filename) @app.route('/') def serveIndex(): return send_from_directory('frontend/', "index.html") @app.route('/docs', methods=['POST']) def docs(): body=request.get_json() question=body["question"] maxFragmentsToReturn=int(body.get("maxFragmentsToReturn",3)) maxFragmentsToSelect=int(body.get("maxFragmentsToReturn",6)) wordSalad=body.get("context","")+" "+question [question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda : extractQuestionData(question,wordSalad)) affineDocs=utils.enqueue(lambda : getAffineDocs( question,context,keywords,shortQuestion,wordSalad, maxFragmentsToReturn=maxFragmentsToReturn, maxFragmentsToSelect=maxFragmentsToSelect )) plainDocs=[ { "content":doc.page_content, "metadata":doc.metadata } for doc in affineDocs ] return json.dumps(plainDocs) serve(app, host="0.0.0.0", port=8080, connection_limit=1000)
[ "langchain.chains.conversation.memory.ConversationSummaryBufferMemory", "langchain.llms.OpenAI", "langchain.llms.AI21", "langchain.llms.Cohere", "langchain.chains.qa_with_sources.load_qa_with_sources_chain", "langchain.llms.NLPCloud", "langchain.prompts.PromptTemplate" ]
[((5785, 5803), 'SmartCache.SmartCache', 'SmartCache', (['CONFIG'], {}), '(CONFIG)\n', (5795, 5803), False, 'from SmartCache import SmartCache\n'), ((6330, 6345), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (6335, 6345), False, 'from flask import Flask, send_from_directory\n'), ((9830, 9890), 'waitress.serve', 'serve', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8080)', 'connection_limit': '(1000)'}), "(app, host='0.0.0.0', port=8080, connection_limit=1000)\n", (9835, 9890), False, 'from waitress import serve\n'), ((1263, 1275), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1272, 1275), False, 'import json\n'), ((1280, 1310), 'embeddings.EmbeddingsManager.init', 'EmbeddingsManager.init', (['CONFIG'], {}), '(CONFIG)\n', (1302, 1310), False, 'from embeddings import EmbeddingsManager\n'), ((1315, 1335), 'Summary.Summary.init', 'Summary.init', (['CONFIG'], {}), '(CONFIG)\n', (1327, 1335), False, 'from Summary import Summary\n'), ((1591, 1614), 'translator.Translator.init', 'Translator.init', (['CONFIG'], {}), '(CONFIG)\n', (1606, 1614), False, 'from translator import Translator\n'), ((2557, 2659), 're.sub', 're.sub', (['"""\\\\b(app|applet|game|application)\\\\b"""', '"""simple application"""', 'question'], {'flags': 're.IGNORECASE'}), "('\\\\b(app|applet|game|application)\\\\b', 'simple application',\n question, flags=re.IGNORECASE)\n", (2563, 2659), False, 'import re\n'), ((3341, 3432), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'question', 'summaries']", 'template': 'template'}), "(input_variables=['history', 'question', 'summaries'],\n template=template)\n", (3355, 3432), False, 'from langchain.prompts import PromptTemplate\n'), ((4371, 4540), 'langchain.chains.conversation.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'max_token_limit': 'history_length', 'human_prefix': '"""QUESTION"""', 'ai_prefix': '"""ANSWER"""', 'memory_key': '"""history"""', 'input_key': '"""question"""'}), "(llm=llm, max_token_limit=history_length,\n human_prefix='QUESTION', ai_prefix='ANSWER', memory_key='history',\n input_key='question')\n", (4402, 4540), False, 'from langchain.chains.conversation.memory import ConversationSummaryBufferMemory, ConversationBufferMemory, ConversationBufferWindowMemory\n'), ((4543, 4618), 'langchain.chains.qa_with_sources.load_qa_with_sources_chain', 'load_qa_with_sources_chain', (['llm'], {'memory': 'memory', 'prompt': 'prompt', 'verbose': '(True)'}), '(llm, memory=memory, prompt=prompt, verbose=True)\n', (4569, 4618), False, 'from langchain.chains.qa_with_sources import load_qa_with_sources_chain\n'), ((4748, 4842), 'Summary.Summary.summarizeMarkdown', 'Summary.summarizeMarkdown', (['question'], {'min_length': '(100)', 'max_length': '(1024)', 'withCodeBlocks': '(False)'}), '(question, min_length=100, max_length=1024,\n withCodeBlocks=False)\n', (4773, 4842), False, 'from Summary import Summary\n'), ((4849, 4911), 'Summary.Summary.summarizeText', 'Summary.summarizeText', (['wordSalad'], {'min_length': '(20)', 'max_length': '(32)'}), '(wordSalad, min_length=20, max_length=32)\n', (4870, 4911), False, 'from Summary import Summary\n'), ((6497, 6515), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6513, 6515), False, 'from flask import request\n'), ((7046, 7280), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""Hi there! I\'m an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics."""'], {}), '(\'en\', lang,\n "Hi there! I\'m an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics."\n )\n', (7066, 7280), False, 'from translator import Translator\n'), ((8890, 8932), 'flask.send_from_directory', 'send_from_directory', (['"""frontend/"""', 'filename'], {}), "('frontend/', filename)\n", (8909, 8932), False, 'from flask import Flask, send_from_directory\n'), ((8979, 9025), 'flask.send_from_directory', 'send_from_directory', (['"""frontend/"""', '"""index.html"""'], {}), "('frontend/', 'index.html')\n", (8998, 9025), False, 'from flask import Flask, send_from_directory\n'), ((9086, 9104), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (9102, 9104), False, 'from flask import request\n'), ((9806, 9827), 'json.dumps', 'json.dumps', (['plainDocs'], {}), '(plainDocs)\n', (9816, 9827), False, 'import json\n'), ((1359, 1382), 'query.embeddingsquery.EmbeddingsQuery', 'EmbeddingsQuery', (['CONFIG'], {}), '(CONFIG)\n', (1374, 1382), False, 'from query.embeddingsquery import EmbeddingsQuery\n'), ((1392, 1545), 'query.discoursequery.DiscourseQuery', 'DiscourseQuery', (['CONFIG', "CONFIG['JME_HUB_URL']"], {'searchFilter': "CONFIG['JME_HUB_SEARCH_FILTER']", 'knowledgeCutoff': "CONFIG['JME_HUB_KNOWLEDGE_CUTOFF']"}), "(CONFIG, CONFIG['JME_HUB_URL'], searchFilter=CONFIG[\n 'JME_HUB_SEARCH_FILTER'], knowledgeCutoff=CONFIG[\n 'JME_HUB_KNOWLEDGE_CUTOFF'])\n", (1406, 1545), False, 'from query.discoursequery import DiscourseQuery\n'), ((1943, 1954), 'time.time', 'time.time', ([], {}), '()\n', (1952, 1954), False, 'import time\n'), ((3165, 3194), 'os.path.exists', 'os.path.exists', (['template_path'], {}), '(template_path)\n', (3179, 3194), False, 'import os\n'), ((3764, 3841), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, model_name=model_name, max_tokens=max_tokens)\n', (3770, 3841), False, 'from langchain.llms import OpenAI\n'), ((4946, 4983), 'Summary.Summary.getKeywords', 'Summary.getKeywords', (['shortQuestion', '(2)'], {}), '(shortQuestion, 2)\n', (4965, 4983), False, 'from Summary import Summary\n'), ((5914, 5932), 'time.sleep', 'time.sleep', (['(60 * 5)'], {}), '(60 * 5)\n', (5924, 5932), False, 'import time\n'), ((6061, 6099), 'threading.Thread', 'threading.Thread', ([], {'target': 'clearSessions'}), '(target=clearSessions)\n', (6077, 6099), False, 'import threading\n'), ((6407, 6428), 'translator.Translator.getLangs', 'Translator.getLangs', ([], {}), '()\n', (6426, 6428), False, 'from translator import Translator\n'), ((7801, 7819), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (7817, 7819), False, 'from flask import request\n'), ((8605, 8623), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (8615, 8623), False, 'import json\n'), ((3930, 3970), 'langchain.llms.Cohere', 'Cohere', ([], {'model': 'model_name', 'max_tokens': '(700)'}), '(model=model_name, max_tokens=700)\n', (3936, 3970), False, 'from langchain.llms import Cohere\n'), ((5024, 5086), 'Summary.Summary.summarizeText', 'Summary.summarizeText', (['wordSalad'], {'min_length': '(10)', 'max_length': '(20)'}), '(wordSalad, min_length=10, max_length=20)\n', (5045, 5086), False, 'from Summary import Summary\n'), ((6992, 7003), 'time.time', 'time.time', ([], {}), '()\n', (7001, 7003), False, 'import time\n'), ((7398, 7532), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""This chat bot is intended to provide helpful information, but accuracy is not guaranteed."""'], {}), "('en', lang,\n 'This chat bot is intended to provide helpful information, but accuracy is not guaranteed.'\n )\n", (7418, 7532), False, 'from translator import Translator\n'), ((7636, 7684), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""Who are you?"""'], {}), "('en', lang, 'Who are you?')\n", (7656, 7684), False, 'from translator import Translator\n'), ((7971, 7998), 'translator.Translator.detect', 'Translator.detect', (['question'], {}), '(question)\n', (7988, 7998), False, 'from translator import Translator\n'), ((8044, 8086), 'translator.Translator.translate', 'Translator.translate', (['lang', '"""en"""', 'question'], {}), "(lang, 'en', question)\n", (8064, 8086), False, 'from translator import Translator\n'), ((8290, 8330), 'json.dumps', 'json.dumps', (["{'error': 'Session expired'}"], {}), "({'error': 'Session expired'})\n", (8300, 8330), False, 'import json\n'), ((8499, 8554), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', "output['output_text']"], {}), "('en', lang, output['output_text'])\n", (8519, 8554), False, 'from translator import Translator\n'), ((8785, 8816), 'json.dumps', 'json.dumps', (["{'error': errorStr}"], {}), "({'error': errorStr})\n", (8795, 8816), False, 'import json\n'), ((2241, 2252), 'time.time', 'time.time', ([], {}), '()\n', (2250, 2252), False, 'import time\n'), ((4072, 4111), 'langchain.llms.AI21', 'AI21', ([], {'temperature': '(0.7)', 'model': 'model_name'}), '(temperature=0.7, model=model_name)\n', (4076, 4111), False, 'from langchain.llms import AI21\n'), ((6010, 6021), 'time.time', 'time.time', ([], {}), '()\n', (6019, 6021), False, 'import time\n'), ((6267, 6292), 'secrets.choice', 'secrets.choice', (['hex_chars'], {}), '(hex_chars)\n', (6281, 6292), False, 'import secrets\n'), ((6911, 6922), 'time.time', 'time.time', ([], {}), '()\n', (6920, 6922), False, 'import time\n'), ((8682, 8704), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8702, 8704), False, 'import traceback\n'), ((4193, 4224), 'langchain.llms.NLPCloud', 'NLPCloud', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (4201, 4224), False, 'from langchain.llms import NLPCloud\n'), ((6201, 6212), 'time.time', 'time.time', ([], {}), '()\n', (6210, 6212), False, 'import time\n')]
#!/usr/bin/env python # coding: utf-8 # #### Document summarization application Falcon LLM using Sagemaker Jumpstart ### Author : Dipjyoti Das ### Last Edited : Jan 19, 2024 ### This script provides an example for how to use Sagemaker Jumpstart -for text summarization use case. It used Falcon 7B open source model ### from Jumsptart model hub with Langchain. # #### Prerequisites #### AWS Innovation Sandbox should be installed and Domain created in Sagemaker # #### Deploy Falcon 7B using Sagemaker Jumpstart # To deploy your model, complete the following steps: # # 1. Navigate to your SageMaker Studio environment from the SageMaker console. # 2. Within the IDE, under SageMaker JumpStart in the navigation pane, choose Models, notebooks, solutions. # 3. Deploy the Falcon 7B Instruct model to an endpoint for inference # 4. Choose Open notebook. # 5. Run the first four cells of the notebook to deploy the Falcon 7B Instruct endpoint.You can see your deployed JumpStart models on the Launched JumpStart assets page. # 6. In the navigation pane, under SageMaker Jumpstart, choose Launched JumpStart assets. # 7. Choose the Model endpoints tab to view the status of your endpoint. ## If you get ResouceLimitExceeded error for deplying the model endpoint for a particular instance (ex : 'ml.g5.2xlarge' etc), ## please contatct AWS admin to raise a ticket and raise the account level service limit Instances. # In[41]: # Import the Boto3 and JSON modules import json import boto3 import warnings warnings.filterwarnings('ignore') # In[40]: # define a function that will call the endpoint. This function takes a dictionary payload and uses it to invoke the SageMaker runtime client. # Then it deserializes the response and prints the input and generated text. newline, bold, unbold = '\n', '\033[1m', '\033[0m' # get the endpoint after the model is deployed in Jumpstart endpoint_name ='hf-llm-falcon-7b-instruct-bf16-2024-01-19-14-57-58-026' def query_endpoint(payload): client = boto3.client('runtime.sagemaker') response = client.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/json', Body=json.dumps(payload).encode('utf-8')) model_predictions = json.loads(response['Body'].read()) generated_text = model_predictions[0]['generated_text'] print ( f"Input Text: {payload['inputs']}{newline}" f"Generated Text: {bold}{generated_text}{unbold}{newline}") # In[24]: # payload includes the prompt as inputs, together with the inference parameters that will be passed to the model. # We can use these parameters with the prompt to tune the output of the model for your use case: payload = { "inputs": "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", "parameters":{ "max_new_tokens": 50, "return_full_text": False, "do_sample": True, "top_k":10 } } # #### Query with a summarization prompt # In[ ]: # Create a sample text document (document to summarize) in your present directory - document.txt # In[25]: # create a function that uses prompt engineering techniques to summarize document.txt def summarize(text_to_summarize): summarization_prompt = """Process the following text and then perform the instructions that follow: {text_to_summarize} Provide a short summary of the preceeding text. Summary:""" payload = { "inputs": summarization_prompt, "parameters":{ "max_new_tokens": 150, "return_full_text": False, "do_sample": True, "top_k":10 } } response = query_endpoint(payload) # * For longer documents, an error might appear. Falcon and all other LLMs, has a limit on the number of tokens passed as input. We can get around this limit using LangChain’s enhanced summarization capabilities, which allows for a much larger input to be passed to the LLM. # In[27]: def summarize(text_to_summarize): summarization_prompt = """Process the following text and then perform the instructions that follow: {text_to_summarize} Provide a short summary of the preceeding text. Summary:""" payload = { "inputs": summarization_prompt, "parameters":{ "max_new_tokens": 150, "return_full_text": False, "do_sample": True, "top_k":10 } } response = query_endpoint(payload) print(response) with open("document.txt") as f: text_to_summarize = f.read() # In[28]: summarize(text_to_summarize) # * For longer documents, an error might appear. Falcon and all other LLMs, has a limit on the number of tokens passed as input. We can get around this limit using LangChain’s enhanced summarization capabilities, which allows for a much larger input to be passed to the LLM. # #### Import and run a summarization chain # In[42]: get_ipython().system('pip install langchain') get_ipython().system('pip install transformers') # In[26]: ls # In[30]: # Import the relevant modules and break down the long document into chunks: import langchain from langchain import SagemakerEndpoint, PromptTemplate from langchain.llms.sagemaker_endpoint import LLMContentHandler from langchain.chains.summarize import load_summarize_chain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.docstore.document import Document text_splitter = RecursiveCharacterTextSplitter( chunk_size = 500, chunk_overlap = 20, separators = [" "], length_function = len ) input_documents = text_splitter.create_documents([text_to_summarize]) # In[31]: # To make LangChain work effectively with Falcon, we need to define the default content handler classes for valid input and output: class ContentHandlerTextSummarization(LLMContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs={}) -> bytes: input_str = json.dumps({"inputs": prompt, **model_kwargs}) return input_str.encode("utf-8") def transform_output(self, output: bytes) -> json: response_json = json.loads(output.read().decode("utf-8")) generated_text = response_json[0]['generated_text'] return generated_text.split("summary:")[-1] content_handler = ContentHandlerTextSummarization() # In[32]: # We can define custom prompts as PromptTemplate objects, the main vehicle for prompting with LangChain, for the map-reduce summarization approach. # This is an optional step because mapping and combine prompts are provided by default if the parameters within the call to load the summarization chain # (load_summarize_chain) are undefined. map_prompt = """Write a concise summary of this text in a few complete sentences: {text} Concise summary:""" map_prompt_template = PromptTemplate( template=map_prompt, input_variables=["text"] ) combine_prompt = """Combine all these following summaries and generate a final summary of them in a few complete sentences: {text} Final summary:""" combine_prompt_template = PromptTemplate( template=combine_prompt, input_variables=["text"] ) # In[33]: # LangChain supports LLMs hosted on SageMaker inference endpoints, so instead of using the AWS Python SDK, we can initialize the # connection through LangChain for greater accessibility: summary_model = SagemakerEndpoint( endpoint_name = endpoint_name, region_name= "us-east-1", model_kwargs= {}, content_handler=content_handler ) # In[39]: # load in a summarization chain and run a summary on the input documents using the following code: summary_chain = load_summarize_chain(llm=summary_model, chain_type="map_reduce", map_prompt=map_prompt_template, combine_prompt=combine_prompt_template, verbose=False ) summary = summary_chain({"input_documents": input_documents, 'token_max': 700}, return_only_outputs=True) print(summary["output_text"]) # In[37]: endpoint_name # In[ ]: # delete the inference endpoint to avoid incurring unnecessary costs client = boto3.client('runtime.sagemaker') client.delete_endpoint(EndpointName=endpoint_name)
[ "langchain.chains.summarize.load_summarize_chain", "langchain.SagemakerEndpoint", "langchain.PromptTemplate", "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((1513, 1546), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1536, 1546), False, 'import warnings\n'), ((5586, 5694), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(20)', 'separators': "[' ']", 'length_function': 'len'}), "(chunk_size=500, chunk_overlap=20, separators\n =[' '], length_function=len)\n", (5616, 5694), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((7103, 7164), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'map_prompt', 'input_variables': "['text']"}), "(template=map_prompt, input_variables=['text'])\n", (7117, 7164), False, 'from langchain import SagemakerEndpoint, PromptTemplate\n'), ((7418, 7483), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'combine_prompt', 'input_variables': "['text']"}), "(template=combine_prompt, input_variables=['text'])\n", (7432, 7483), False, 'from langchain import SagemakerEndpoint, PromptTemplate\n'), ((7787, 7912), 'langchain.SagemakerEndpoint', 'SagemakerEndpoint', ([], {'endpoint_name': 'endpoint_name', 'region_name': '"""us-east-1"""', 'model_kwargs': '{}', 'content_handler': 'content_handler'}), "(endpoint_name=endpoint_name, region_name='us-east-1',\n model_kwargs={}, content_handler=content_handler)\n", (7804, 7912), False, 'from langchain import SagemakerEndpoint, PromptTemplate\n'), ((8140, 8301), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'summary_model', 'chain_type': '"""map_reduce"""', 'map_prompt': 'map_prompt_template', 'combine_prompt': 'combine_prompt_template', 'verbose': '(False)'}), "(llm=summary_model, chain_type='map_reduce', map_prompt\n =map_prompt_template, combine_prompt=combine_prompt_template, verbose=False\n )\n", (8160, 8301), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((8737, 8770), 'boto3.client', 'boto3.client', (['"""runtime.sagemaker"""'], {}), "('runtime.sagemaker')\n", (8749, 8770), False, 'import boto3\n'), ((2009, 2042), 'boto3.client', 'boto3.client', (['"""runtime.sagemaker"""'], {}), "('runtime.sagemaker')\n", (2021, 2042), False, 'import boto3\n'), ((6234, 6280), 'json.dumps', 'json.dumps', (["{'inputs': prompt, **model_kwargs}"], {}), "({'inputs': prompt, **model_kwargs})\n", (6244, 6280), False, 'import json\n'), ((2146, 2165), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (2156, 2165), False, 'import json\n')]
import os import streamlit as st from PyPDF2 import PdfReader import langchain langchain.verbose = False from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain.chains.question_answering import load_qa_chain from langchain.llms import OpenAI from langchain.callbacks import get_openai_callback import requests from bs4 import BeautifulSoup def webscrap(name): # Replace this URL with the one you want to scrape url = f'https://www.{name}.com' response = requests.get(url) if response.status_code == 200: soup = BeautifulSoup(response.text, 'html.parser') page_text = soup.get_text() return page_text else: return None def main(): print(os.getenv('OPENAI_API_KEY')) st.set_page_config(page_title="Webscrap chatbot") st.header("Webscrap chatbot") name = st.text_input("enter website name") web_data= webscrap(name) if web_data is not None: text = web_data # for page in pdf_reader.pages: # text += page.extract_text() max_length = 1800 original_string = text temp_string = "" strings_list = [] for character in original_string: if len(temp_string) < max_length: temp_string += character else: strings_list.append(temp_string) temp_string = "" if temp_string: strings_list.append(temp_string) #split into chunks # create embeddings embeddings = OpenAIEmbeddings() knowledge_base = FAISS.from_texts(strings_list, embedding=embeddings) user_question = st.text_input("Ask a question about your PDF") if user_question: docs = knowledge_base.similarity_search(user_question) llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0.9) chain = load_qa_chain(llm, chain_type="stuff") with get_openai_callback() as cb: response = chain.run(input_documents = docs, question = user_question) print(cb) st.write(response) if __name__ == '__main__': main()
[ "langchain.chains.question_answering.load_qa_chain", "langchain.llms.OpenAI", "langchain.callbacks.get_openai_callback", "langchain.vectorstores.FAISS.from_texts", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((583, 600), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (595, 600), False, 'import requests\n'), ((853, 902), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Webscrap chatbot"""'}), "(page_title='Webscrap chatbot')\n", (871, 902), True, 'import streamlit as st\n'), ((907, 936), 'streamlit.header', 'st.header', (['"""Webscrap chatbot"""'], {}), "('Webscrap chatbot')\n", (916, 936), True, 'import streamlit as st\n'), ((949, 984), 'streamlit.text_input', 'st.text_input', (['"""enter website name"""'], {}), "('enter website name')\n", (962, 984), True, 'import streamlit as st\n'), ((653, 696), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (666, 696), False, 'from bs4 import BeautifulSoup\n'), ((815, 842), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (824, 842), False, 'import os\n'), ((1660, 1678), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1676, 1678), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1704, 1756), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['strings_list'], {'embedding': 'embeddings'}), '(strings_list, embedding=embeddings)\n', (1720, 1756), False, 'from langchain.vectorstores import FAISS\n'), ((1782, 1828), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF"""'], {}), "('Ask a question about your PDF')\n", (1795, 1828), True, 'import streamlit as st\n'), ((1954, 2005), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model_name='gpt-3.5-turbo', temperature=0.9)\n", (1960, 2005), False, 'from langchain.llms import OpenAI\n'), ((2027, 2065), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (2040, 2065), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2238, 2256), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (2246, 2256), True, 'import streamlit as st\n'), ((2083, 2104), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2102, 2104), False, 'from langchain.callbacks import get_openai_callback\n')]
# Wrapper for Hugging Face APIs for llmlib from llmlib.base_model_wrapper import BaseModelWrapper from llama_index import ListIndex, SimpleDirectoryReader from langchain.embeddings.huggingface import HuggingFaceEmbeddings from llama_index import LangchainEmbedding, ServiceContext from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader from transformers import pipeline import torch from langchain.llms.base import LLM from llama_index import LLMPredictor from pprint import pprint class CustomLLM(LLM): model_name = "facebook/opt-iml-1.3b" # I am not using a GPU, but you can add device="cuda:0" # to the pipeline call if you have a local GPU or # are running this on Google Colab: pipeline = pipeline("text-generation", model=model_name, model_kwargs={"torch_dtype":torch.bfloat16}) def _call(self, prompt, stop = None): prompt_length = len(prompt) response = self.pipeline(prompt, max_new_tokens=200) pprint(response) first_response = response[0]["generated_text"] # only return newly generated tokens returned_text = first_response[prompt_length:] return returned_text @property def _identifying_params(self): return {"name_of_model": self.model_name} @property def _llm_type(self): return "custom" class HuggingFaceAiWrapper(BaseModelWrapper): def __init__(self, key=None, embeddings_dir="./db_embeddings"): super().__init__(embeddings_dir=embeddings_dir) self.llm_predictor = LLMPredictor(llm=CustomLLM()) self.embed_model = LangchainEmbedding(HuggingFaceEmbeddings()) self.service_context = \ ServiceContext.from_defaults(llm_predictor=self.llm_predictor, embed_model=self.embed_model) max_input_size = 512 num_output = 64 max_chunk_overlap = 0 # 10 self.prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap) self.pipeline = None # complete text: def get_completion(self, prompt, max_tokens=64): if self.pipeline is None: self.pipeline = pipeline("text-generation", model="facebook/opt-iml-1.3b", model_kwargs={"torch_dtype":torch.bfloat16}) c = self.pipeline(prompt, max_new_tokens=max_tokens) pprint(c) try: return c[0]["generated_text"] except Exception as e: print(e) return "" def create_local_embeddings_files_in_dir(self, path): " path is a directory " self.documents = SimpleDirectoryReader(path).load_data() self.index = ListIndex.from_documents(documents=self.documents, llm_predictor=self.llm_predictor, prompt_helper=self.prompt_helper) self.index = self.index.as_query_engine(llm_predictor=self.llm_predictor) # query local embeddings: def query_local_embeddings(self, query, n=10): answer = self.index.query(query) return answer
[ "langchain.embeddings.huggingface.HuggingFaceEmbeddings" ]
[((735, 830), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_name', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text-generation', model=model_name, model_kwargs={'torch_dtype':\n torch.bfloat16})\n", (743, 830), False, 'from transformers import pipeline\n'), ((1022, 1038), 'pprint.pprint', 'pprint', (['response'], {}), '(response)\n', (1028, 1038), False, 'from pprint import pprint\n'), ((1731, 1828), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'self.llm_predictor', 'embed_model': 'self.embed_model'}), '(llm_predictor=self.llm_predictor, embed_model=\n self.embed_model)\n', (1759, 1828), False, 'from llama_index import LangchainEmbedding, ServiceContext\n'), ((1980, 2039), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (1992, 2039), False, 'from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader\n'), ((2498, 2507), 'pprint.pprint', 'pprint', (['c'], {}), '(c)\n', (2504, 2507), False, 'from pprint import pprint\n'), ((2823, 2946), 'llama_index.ListIndex.from_documents', 'ListIndex.from_documents', ([], {'documents': 'self.documents', 'llm_predictor': 'self.llm_predictor', 'prompt_helper': 'self.prompt_helper'}), '(documents=self.documents, llm_predictor=self.\n llm_predictor, prompt_helper=self.prompt_helper)\n', (2847, 2946), False, 'from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader\n'), ((1663, 1686), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (1684, 1686), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((2248, 2357), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': '"""facebook/opt-iml-1.3b"""', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text-generation', model='facebook/opt-iml-1.3b', model_kwargs={\n 'torch_dtype': torch.bfloat16})\n", (2256, 2357), False, 'from transformers import pipeline\n'), ((2762, 2789), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {}), '(path)\n', (2783, 2789), False, 'from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader\n')]
import logging import ConsoleInterface import langchain.schema from langchain.agents import initialize_agent, AgentType #create_pandas_dataframe_agent logger = logging.getLogger('ConsoleInterface') ''' def PandasDataframeAgent(llm, Dataframe): """ Create a PandasDataframeAgent object. Parameters: llm (str): The llm parameter. Dataframe (pandas.DataFrame): The DataFrame parameter. Returns: PandasDataframeAgent: The created PandasDataframeAgent object. """ PandasDataframeAgent = create_pandas_dataframe_agent(llm, df=Dataframe, verbose=True) return PandasDataframeAgent ''' def RunConversationalAgent(llm, Tools, Memory): """ Run the conversational agent. Args: llm: The language model used by the agent. Tools: The tools available to the agent. Memory: The memory used by the agent. Returns: None """ initialize_agent Agent = initialize_agent(agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, llm=llm, tools=Tools, memory=Memory, verbose=True) logger.info("Agent initialized successfully!\n") while True: query = input("Enter your query: ") if query.lower() == "exit" or query.lower() == "quit": break try: Agent.run(str(query)) except langchain.schema.OutputParserException as e: # Extract the message from the exception message = str(e) # The message is in the form "Could not parse LLM output: `...`" # So, we can split it by the backticks and take the second element answer = message.split('`')[1] logger.warning("\nError occured in retrieving answer from language model. Please check your query and try again. Answer stored in error message will be printed:\n") logger.warning("\nAnswer: ", answer)
[ "langchain.agents.initialize_agent" ]
[((165, 202), 'logging.getLogger', 'logging.getLogger', (['"""ConsoleInterface"""'], {}), "('ConsoleInterface')\n", (182, 202), False, 'import logging\n'), ((946, 1067), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'agent': 'AgentType.CONVERSATIONAL_REACT_DESCRIPTION', 'llm': 'llm', 'tools': 'Tools', 'memory': 'Memory', 'verbose': '(True)'}), '(agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, llm=llm,\n tools=Tools, memory=Memory, verbose=True)\n', (962, 1067), False, 'from langchain.agents import initialize_agent, AgentType\n')]
import csv from ctypes import Array from typing import Any, Coroutine, List, Tuple import io import time import re import os from fastapi import UploadFile import asyncio import langchain from langchain.chat_models import ChatOpenAI from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent from langchain.tools import HumanInputRun, PythonAstREPLTool from langchain.callbacks.tracers import ConsoleCallbackHandler from langchain.callbacks import HumanApprovalCallbackHandler from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory from langchain import PromptTemplate import pandas as pd from langchain.output_parsers import PydanticOutputParser, OutputFixingParser from util.tools import SessionHumanInputRun import util.config as config from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue import redis r = redis.from_url(os.environ.get("REDIS_URL")) #r = redis.from_url('redis://:password@localhost:6379') class Processor: def __init__(self, session): self.session = session async def extract_csv_description(self, df: UploadFile|str, llm, memory) -> Coroutine[Any, Any, Tuple[pd.DataFrame, str]] : df = pd.read_csv(df) agent = create_pandas_dataframe_agent(llm=llm,df=df, agent_executor_kwargs={'handle_parsing_errors':True, 'memory':memory}, early_stopping_method="generate", verbose=True, temperature=0,agent_type=AgentType.OPENAI_FUNCTIONS,) descriptions = agent.run("""Describe what is the column name of each of the column table in detail in the following format: <name of column 1>: <description of column 1>\n <name of column 2>: <description of column 2>""", callbacks=[ConsoleCallbackHandler()]) return df, descriptions async def _human_prompt(prompt, session): r.publish(f'human_prompt_{session}', prompt) async def _human_input(session): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(f'human_input_{session}') message = None while True: message = p.get_message() if message and message['type']=='message': break print("waiting for human input") await asyncio.sleep(1) return message['data'].decode('utf-8') async def process_files(self, table_file, template_file, file_guid): table_string = table_file.decode('utf-8') template_string = template_file.decode('utf-8') llm = ChatOpenAI(openai_api_key=config.OPENAI_API_KEY, temperature=0, model="gpt-3.5-turbo-0613", ) memory = ConversationSummaryBufferMemory(llm=llm,memory_key="chat_history", return_messages=True, max_token_limit=1500) table_df, table_descriptions = await self.extract_csv_description(io.StringIO(table_string), llm, memory=memory) r.publish(f'{self.session}_response', 'table_descriptions') r.publish(f'{self.session}_response', table_descriptions) template_df, template_descriptions = await self.extract_csv_description(io.StringIO(template_string), llm, memory=memory) r.publish(f'{self.session}_response', 'template_descriptions') r.publish(f'{self.session}_response', template_descriptions) dfs =[table_df, template_df] human_tool = SessionHumanInputRun(session=self.session) human_tool.description = ''' Use this tool to take human input. If the mapping is ambiguous, ask 'human' a question with options in the following format. Make the human confirm the mapping by selecting the appropriate number. - Question: The template column <template column name> should be mapped to which one of the table columns (1: <table column name 1>, 2: <table column name 2> (Recommended), 3:<table column name 3>, ...)? Select the appropriate number or specify the column name. ''' human_tool.prompt_func= Processor._human_prompt human_tool.input_func = Processor._human_input mappings = await self.get_mappings(llm, table_descriptions, template_descriptions, human_tool) codes = await self.get_template_formatting_code(llm, table_df, template_df, human_tool, mappings, memory) new_table_df = table_df.loc[:,[code.table_column for code in codes]] for code in codes: new_table_df[code.table_column].apply(lambda x: self.format_value(x,code=code.code)) r.set(f"{self.session}_{file_guid}", new_table_df.to_msgpack(compress='zlib')) r.publish(f'{self.session}_response', f'file_guid:{file_guid}') def format_value(self, source_value, code): value = TransformValue(source=source_value,destination=source_value) try: exec(code, {'value':value}) except Exception as e: r.publish(f'{self.session}_response',f'ERROR: \nCode: \n {code} \n Failed with error: \n{e}') print(e) return value.destination async def get_mappings(self,llm, table_descriptions, template_descriptions, human_tool): parser = PydanticOutputParser(pydantic_object=TemplateMappingList) new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm) agent = initialize_agent( [human_tool], llm, agent=AgentType.OPENAI_FUNCTIONS, handle_parsing_errors=True, early_stopping_method="force", temperature=0.3, output_parser=new_parser, ) descriptions = await agent.arun("""Map all the columns of the Template descriptions to columns of the table Descriptions: - Table Descriptions: """ + table_descriptions + """ - Template Descriptions: """ + template_descriptions + """ Use the table and template descriptions above to determine the mapping based on similarity, formats and distribution. If the table column names are ambiguous take human input. """,callbacks=[ConsoleCallbackHandler()],) print(descriptions) mappings = new_parser.parse(descriptions) return mappings async def get_template_formatting_code(self, llm, table_df, template_df, human_tool, mappings: TemplateMappingList, memory): dfs = [] dfs.append(table_df) dfs.append(template_df) df_locals = {} df_locals[f"table_df"] = table_df df_locals[f"template_df"] = template_df parser = PydanticOutputParser(pydantic_object=TemplateMappingCode) new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm) codes=[] #The code should be in the format of a Python function taking as input a string and returning a string. for mapping in mappings.template_mappings: human_tool.description = f''' Use this tool to get human approval. Always show the samples and code. The human can edit the code and approve it. ''' table_df_samples = table_df[mapping.table_column].sample(5).to_list() template_df_samples = template_df[mapping.template_column].sample(5).to_list() agent = initialize_agent( [PythonAstREPLTool(locals=df_locals)], llm, agent=AgentType.OPENAI_FUNCTIONS, handle_parsing_errors=True, early_stopping_method="force", temperature=0.3, output_parser=new_parser, memory = memory, memory_key = 'chat_history' ) #The AI can determine the format of the column values only after sampling. #As shown in the output below, generate the code as a Python function taking as input a string and returning a string and also include a call to the generated function. code = agent.run(f'''Provide the code to bring the format of values in table_df column '{mapping.table_column}' to the format of values in template_df column '{mapping.template_column}' based off the values, data types and formats. Additional samples to be used to generate the code: '{mapping.table_column}' sample values: [{table_df_samples}] '{mapping.template_column}' samples values: [{template_df_samples}] The input to the code will be a value object with the following attributes: - source: The value of the table_df column '{mapping.table_column}'. - destination: The value of the template_df column '{mapping.template_column}'. Show the sample values using which the code is generated. For example, for date columns, they may be in different formats, and it is necessary to change the format from dd.mm.yyyy to mm.dd.yyyy. Final Answer: ``` ```python def format_value(source_value): <code to transform source_value into destination_value> return destination_value value.destination = format_value(value.source) ``` ``` Final Answer should contain the samples and code. ''', callbacks=[ConsoleCallbackHandler(), ]) print(code) human_code = await human_tool.arun(code + '\nSpecify the code with ```python``` tags.') regex = r"```python((.|\n|\t)*?)```" code = human_code if re.match(regex, human_code) else code matches = re.findall(regex, code) code = '' for match in matches: code = code + '\n'+ '\n'.join(match) codes.append(TemplateMappingCode(template_column=mapping.template_column, table_column=mapping.table_column, code=code)) return codes
[ "langchain.agents.initialize_agent", "langchain.memory.ConversationSummaryBufferMemory", "langchain.output_parsers.PydanticOutputParser", "langchain.tools.PythonAstREPLTool", "langchain.agents.create_pandas_dataframe_agent", "langchain.chat_models.ChatOpenAI", "langchain.callbacks.tracers.ConsoleCallbackHandler", "langchain.output_parsers.OutputFixingParser.from_llm" ]
[((963, 990), 'os.environ.get', 'os.environ.get', (['"""REDIS_URL"""'], {}), "('REDIS_URL')\n", (977, 990), False, 'import os\n'), ((1270, 1285), 'pandas.read_csv', 'pd.read_csv', (['df'], {}), '(df)\n', (1281, 1285), True, 'import pandas as pd\n'), ((1302, 1537), 'langchain.agents.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', ([], {'llm': 'llm', 'df': 'df', 'agent_executor_kwargs': "{'handle_parsing_errors': True, 'memory': memory}", 'early_stopping_method': '"""generate"""', 'verbose': '(True)', 'temperature': '(0)', 'agent_type': 'AgentType.OPENAI_FUNCTIONS'}), "(llm=llm, df=df, agent_executor_kwargs={\n 'handle_parsing_errors': True, 'memory': memory}, early_stopping_method\n ='generate', verbose=True, temperature=0, agent_type=AgentType.\n OPENAI_FUNCTIONS)\n", (1331, 1537), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((2683, 2779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'config.OPENAI_API_KEY', 'temperature': '(0)', 'model': '"""gpt-3.5-turbo-0613"""'}), "(openai_api_key=config.OPENAI_API_KEY, temperature=0, model=\n 'gpt-3.5-turbo-0613')\n", (2693, 2779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2794, 2909), 'langchain.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(1500)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True, max_token_limit=1500)\n", (2825, 2909), False, 'from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory\n'), ((3498, 3540), 'util.tools.SessionHumanInputRun', 'SessionHumanInputRun', ([], {'session': 'self.session'}), '(session=self.session)\n', (3518, 3540), False, 'from util.tools import SessionHumanInputRun\n'), ((4868, 4929), 'util.model.TransformValue', 'TransformValue', ([], {'source': 'source_value', 'destination': 'source_value'}), '(source=source_value, destination=source_value)\n', (4882, 4929), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((5288, 5345), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingList'}), '(pydantic_object=TemplateMappingList)\n', (5308, 5345), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5367, 5418), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (5394, 5418), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5434, 5614), 'langchain.agents.initialize_agent', 'initialize_agent', (['[human_tool]', 'llm'], {'agent': 'AgentType.OPENAI_FUNCTIONS', 'handle_parsing_errors': '(True)', 'early_stopping_method': '"""force"""', 'temperature': '(0.3)', 'output_parser': 'new_parser'}), "([human_tool], llm, agent=AgentType.OPENAI_FUNCTIONS,\n handle_parsing_errors=True, early_stopping_method='force', temperature=\n 0.3, output_parser=new_parser)\n", (5450, 5614), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((6899, 6956), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingCode'}), '(pydantic_object=TemplateMappingCode)\n', (6919, 6956), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((6978, 7029), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (7005, 7029), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((10532, 10555), 're.findall', 're.findall', (['regex', 'code'], {}), '(regex, code)\n', (10542, 10555), False, 'import re\n'), ((2425, 2441), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (2438, 2441), False, 'import asyncio\n'), ((2988, 3013), 'io.StringIO', 'io.StringIO', (['table_string'], {}), '(table_string)\n', (2999, 3013), False, 'import io\n'), ((3250, 3278), 'io.StringIO', 'io.StringIO', (['template_string'], {}), '(template_string)\n', (3261, 3278), False, 'import io\n'), ((10459, 10486), 're.match', 're.match', (['regex', 'human_code'], {}), '(regex, human_code)\n', (10467, 10486), False, 'import re\n'), ((10690, 10801), 'util.model.TemplateMappingCode', 'TemplateMappingCode', ([], {'template_column': 'mapping.template_column', 'table_column': 'mapping.table_column', 'code': 'code'}), '(template_column=mapping.template_column, table_column=\n mapping.table_column, code=code)\n', (10709, 10801), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((1905, 1929), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (1927, 1929), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((7662, 7697), 'langchain.tools.PythonAstREPLTool', 'PythonAstREPLTool', ([], {'locals': 'df_locals'}), '(locals=df_locals)\n', (7679, 7697), False, 'from langchain.tools import HumanInputRun, PythonAstREPLTool\n'), ((6408, 6432), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (6430, 6432), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((10224, 10248), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (10246, 10248), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n')]
from typing import Dict, List, Optional from langchain.agents.load_tools import ( _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS, ) from langflow.custom import customs from langflow.interface.base import LangChainTypeCreator from langflow.interface.tools.constants import ( ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS, ) from langflow.interface.tools.util import get_tool_params from langflow.settings import settings from langflow.template.field.base import TemplateField from langflow.template.template.base import Template from langflow.utils import util from langflow.utils.util import build_template_from_class TOOL_INPUTS = { "str": TemplateField( field_type="str", required=True, is_list=False, show=True, placeholder="", value="", ), "llm": TemplateField( field_type="BaseLanguageModel", required=True, is_list=False, show=True ), "func": TemplateField( field_type="function", required=True, is_list=False, show=True, multiline=True, ), "code": TemplateField( field_type="str", required=True, is_list=False, show=True, value="", multiline=True, ), "path": TemplateField( field_type="file", required=True, is_list=False, show=True, value="", suffixes=[".json", ".yaml", ".yml"], fileTypes=["json", "yaml", "yml"], ), } class ToolCreator(LangChainTypeCreator): type_name: str = "tools" tools_dict: Optional[Dict] = None @property def type_to_loader_dict(self) -> Dict: if self.tools_dict is None: all_tools = {} for tool, tool_fcn in ALL_TOOLS_NAMES.items(): tool_params = get_tool_params(tool_fcn) tool_name = tool_params.get("name") or tool if tool_name in settings.tools or settings.dev: if tool_name == "JsonSpec": tool_params["path"] = tool_params.pop("dict_") # type: ignore all_tools[tool_name] = { "type": tool, "params": tool_params, "fcn": tool_fcn, } self.tools_dict = all_tools return self.tools_dict def get_signature(self, name: str) -> Optional[Dict]: """Get the signature of a tool.""" base_classes = ["Tool", "BaseTool"] fields = [] params = [] tool_params = {} # Raise error if name is not in tools if name not in self.type_to_loader_dict.keys(): raise ValueError("Tool not found") tool_type: str = self.type_to_loader_dict[name]["type"] # type: ignore # if tool_type in _BASE_TOOLS.keys(): # params = [] if tool_type in _LLM_TOOLS.keys(): params = ["llm"] elif tool_type in _EXTRA_LLM_TOOLS.keys(): extra_keys = _EXTRA_LLM_TOOLS[tool_type][1] params = ["llm"] + extra_keys elif tool_type in _EXTRA_OPTIONAL_TOOLS.keys(): extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type][1] params = extra_keys # elif tool_type == "Tool": # params = ["name", "description", "func"] elif tool_type in CUSTOM_TOOLS: # Get custom tool params params = self.type_to_loader_dict[name]["params"] # type: ignore base_classes = ["function"] if node := customs.get_custom_nodes("tools").get(tool_type): return node elif tool_type in FILE_TOOLS: params = self.type_to_loader_dict[name]["params"] # type: ignore base_classes += [name] elif tool_type in OTHER_TOOLS: tool_dict = build_template_from_class(tool_type, OTHER_TOOLS) fields = tool_dict["template"] # Pop unnecessary fields and add name fields.pop("_type") # type: ignore fields.pop("return_direct") # type: ignore fields.pop("verbose") # type: ignore tool_params = { "name": fields.pop("name")["value"], # type: ignore "description": fields.pop("description")["value"], # type: ignore } fields = [ TemplateField(name=name, field_type=field["type"], **field) for name, field in fields.items() # type: ignore ] base_classes += tool_dict["base_classes"] # Copy the field and add the name for param in params: field = TOOL_INPUTS.get(param, TOOL_INPUTS["str"]).copy() field.name = param field.advanced = False if param == "aiosession": field.show = False field.required = False fields.append(field) template = Template(fields=fields, type_name=tool_type) tool_params = {**tool_params, **self.type_to_loader_dict[name]["params"]} return { "template": util.format_dict(template.to_dict()), **tool_params, "base_classes": base_classes, } def to_list(self) -> List[str]: """List all load tools""" return list(self.type_to_loader_dict.keys()) tool_creator = ToolCreator()
[ "langchain.agents.load_tools._LLM_TOOLS.keys", "langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys", "langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys" ]
[((690, 792), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (703, 792), False, 'from langflow.template.field.base import TemplateField\n'), ((856, 946), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""BaseLanguageModel"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)'}), "(field_type='BaseLanguageModel', required=True, is_list=False,\n show=True)\n", (869, 946), False, 'from langflow.template.field.base import TemplateField\n'), ((970, 1068), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""function"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'multiline': '(True)'}), "(field_type='function', required=True, is_list=False, show=\n True, multiline=True)\n", (983, 1068), False, 'from langflow.template.field.base import TemplateField\n'), ((1124, 1226), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'multiline': '(True)'}), "(field_type='str', required=True, is_list=False, show=True,\n value='', multiline=True)\n", (1137, 1226), False, 'from langflow.template.field.base import TemplateField\n'), ((1291, 1454), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""file"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'suffixes': "['.json', '.yaml', '.yml']", 'fileTypes': "['json', 'yaml', 'yml']"}), "(field_type='file', required=True, is_list=False, show=True,\n value='', suffixes=['.json', '.yaml', '.yml'], fileTypes=['json',\n 'yaml', 'yml'])\n", (1304, 1454), False, 'from langflow.template.field.base import TemplateField\n'), ((4975, 5019), 'langflow.template.template.base.Template', 'Template', ([], {'fields': 'fields', 'type_name': 'tool_type'}), '(fields=fields, type_name=tool_type)\n', (4983, 5019), False, 'from langflow.template.template.base import Template\n'), ((1779, 1802), 'langflow.interface.tools.constants.ALL_TOOLS_NAMES.items', 'ALL_TOOLS_NAMES.items', ([], {}), '()\n', (1800, 1802), False, 'from langflow.interface.tools.constants import ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS\n'), ((2927, 2944), 'langchain.agents.load_tools._LLM_TOOLS.keys', '_LLM_TOOLS.keys', ([], {}), '()\n', (2942, 2944), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((1834, 1859), 'langflow.interface.tools.util.get_tool_params', 'get_tool_params', (['tool_fcn'], {}), '(tool_fcn)\n', (1849, 1859), False, 'from langflow.interface.tools.util import get_tool_params\n'), ((3001, 3024), 'langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys', '_EXTRA_LLM_TOOLS.keys', ([], {}), '()\n', (3022, 3024), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3150, 3178), 'langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys', '_EXTRA_OPTIONAL_TOOLS.keys', ([], {}), '()\n', (3176, 3178), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3874, 3923), 'langflow.utils.util.build_template_from_class', 'build_template_from_class', (['tool_type', 'OTHER_TOOLS'], {}), '(tool_type, OTHER_TOOLS)\n', (3899, 3923), False, 'from langflow.utils.util import build_template_from_class\n'), ((3582, 3615), 'langflow.custom.customs.get_custom_nodes', 'customs.get_custom_nodes', (['"""tools"""'], {}), "('tools')\n", (3606, 3615), False, 'from langflow.custom import customs\n'), ((4407, 4466), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'name': 'name', 'field_type': "field['type']"}), "(name=name, field_type=field['type'], **field)\n", (4420, 4466), False, 'from langflow.template.field.base import TemplateField\n')]
from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.question_answering import load_qa_chain from langchain.embeddings.openai import OpenAIEmbeddings from streamlit_option_menu import option_menu from deep_translator import GoogleTranslator from langchain.vectorstores import Pinecone import streamlit_authenticator as stauth from yaml.loader import SafeLoader from langchain.llms import OpenAI from dotenv import load_dotenv from langchain import OpenAI from PyPDF2 import PdfReader import streamlit as st import langchain load_dotenv() import pinecone import openai import time import yaml import os # Initialization pinecone.init(api_key="db6b2a8c-d59e-48e1-8d5c-4c2704622937",environment="gcp-starter") llm=OpenAI(model_name="gpt-3.5-turbo-instruct") chain=load_qa_chain(llm,chain_type="stuff") index_name="langchainvector" # Home Page def home(): st.title("This is my Home page") # Login Page def login(): st.title("Login page") with open('./config.yaml') as file: config = yaml.load(file, Loader=SafeLoader) authenticator = stauth.Authenticate( config['credentials'], config['cookie']['name'], config['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized'] ) authenticator.login('Login', location = 'main') if st.session_state["authentication_status"]: st.title(f'Welcome *{st.session_state["name"]}*') st.subheader('Click on the Chat to upload document and access AI chatbot') user_name = st.session_state["name"] parent = os.getcwd() path = os.path.join(parent, user_name) if not os.path.exists(path): os.mkdir(path) with st.sidebar: authenticator.logout("Logout", "sidebar") elif st.session_state["authentication_status"] is False: st.error('Username/password is incorrect') elif st.session_state["authentication_status"] is None: st.warning('Please enter your username and password') # Register Page def register(): st.title("Register page") with open('./config.yaml') as file: config = yaml.load(file, Loader=SafeLoader) authenticator = stauth.Authenticate( config['credentials'], config['cookie']['name'], config['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized'] ) if authenticator.register_user('Register user', preauthorization=False): st.success('User registration successfully') with open('./config.yaml', 'a') as file: yaml.dump(config, file, default_flow_style=False) def forgot_pass(): with open('./config.yaml') as file: config = yaml.load(file, Loader=SafeLoader) authenticator = stauth.Authenticate( config['credentials'], config['cookie']['name'], config['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized'] ) username_forgot_pw, email, random_password = authenticator.forgot_password('Forgot password') if username_forgot_pw: st.success(f'New random password is : {random_password}.. Change it in next login') elif username_forgot_pw == False: st.error('Username not found') with open('./config.yaml', 'w') as file: yaml.dump(config, file, default_flow_style=False) def change_pass(): with open('./config.yaml') as file: config = yaml.load(file, Loader=SafeLoader) authenticator = stauth.Authenticate( config['credentials'], config['cookie']['name'], config['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized'] ) if st.session_state["authentication_status"]: if authenticator.reset_password(st.session_state["username"], 'Reset password'): st.success('New password changed') if not st.session_state["authentication_status"]: st.subheader('You need to login to change the password') with open('./config.yaml', 'w') as file: yaml.dump(config, file, default_flow_style=False) def update_profile(): with open('./config.yaml') as file: config = yaml.load(file, Loader=SafeLoader) authenticator = stauth.Authenticate( config['credentials'], config['cookie']['name'], config['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized'] ) if st.session_state["authentication_status"]: if authenticator.update_user_details(st.session_state["username"], 'Update user details'): st.success('Entries updated successfully') if not st.session_state["authentication_status"]: st.subheader('You need to login to update the profile') with open('./config.yaml', 'a') as file: yaml.dump(config, file, default_flow_style=False) # Translatiton def translate_text(text, source='auto', target='hi'): return GoogleTranslator(source=source, target=target).translate(text) # Extract document and create embeddings def process_text(): text = "" if not os.path.exists(st.session_state.txt_path): os.mkdir(st.session_state.txt_path) if st.session_state.doc_type == 'PDF': for file in st.session_state.upload_folder: pdfdata = PdfReader(file) for page in pdfdata.pages: text += page.extract_text() else: for file in pdf_folder: for line in file: text += str(line, encoding = 'utf-8') file = open(st.session_state.txt_path + '/' + 'raw_text.txt' , 'w') file.write(text) text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=100, length_function=len ) chunks = text_splitter.split_text(text) embeddings = OpenAIEmbeddings() st.info('Creating OpenAI embeddings with PINECONE.... Please wait', icon="ℹ️") st.session_state.vector_db = Pinecone.from_texts(chunks,embeddings,index_name=index_name) st.success('Embeddings generated... Start the conversations', icon="✅") def query_answer(query): docs = st.session_state.vector_db.similarity_search(query, k=2) response = chain.run(input_documents=docs, question=query) return response def chatbox(): for message in st.session_state.messages: with st.chat_message(message['role']): st.markdown(message['content']) if prompt := st.chat_input('Ask question about PDF content'): st.session_state.messages.append({'role' : 'user', 'content' : prompt}) with st.chat_message('user'): st.markdown(prompt) with st.chat_message('assistant'): message_placeholder = st.empty() raw_prompt = translate_text(prompt, 'auto', 'en') result = query_answer(prompt) result2 = "" for chunk in result.split(): result2 += chunk + " " time.sleep(0.1) message_placeholder.markdown(result2 + "▌") st.session_state.messages.append({"role": "assistant", "content": result}) def about(key): selection = st.session_state[key] if selection == 'Home': home() if selection == 'Login': login() if selection == 'Register': register() if selection == 'Forgot Password': forgot_pass() def tasks(): st.write('Tasks') def main(): if 'vector_db' not in st.session_state: st.session_state.vector_db = None if 'txt_path' not in st.session_state: st.session_state.txt_path = None if 'doc_type' not in st.session_state: st.session_state.doc_type = None if 'upload_folder' not in st.session_state: st.session_state.upload_folder = None if 'messages' not in st.session_state: st.session_state.messages = [] st.session_state.txt_path = os.path.join(os.getcwd(), 'extract_text') with st.sidebar: selected5 = option_menu(None, ["Home", "Login", "Register", 'Forgot Passoword'], icons=['house', 'login', "register", 'gear'], on_change=about, key='menu_5', orientation="vertical") st.session_state.doc_type = st.selectbox('Document type', ('None','PDF','TXT', 'RST','MD')) st.session_state.upload_folder = st.file_uploader('Upload files', type = ['pdf', 'txt', 'rst','md'], accept_multiple_files=True) submitBtn = st.button('Submit') if submitBtn: process_text() chatbox() if __name__ == '__main__': main()
[ "langchain.chains.question_answering.load_qa_chain", "langchain.vectorstores.Pinecone.from_texts", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.OpenAI", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((560, 573), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (571, 573), False, 'from dotenv import load_dotenv\n'), ((656, 749), 'pinecone.init', 'pinecone.init', ([], {'api_key': '"""db6b2a8c-d59e-48e1-8d5c-4c2704622937"""', 'environment': '"""gcp-starter"""'}), "(api_key='db6b2a8c-d59e-48e1-8d5c-4c2704622937', environment=\n 'gcp-starter')\n", (669, 749), False, 'import pinecone\n'), ((748, 791), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (754, 791), False, 'from langchain import OpenAI\n'), ((798, 836), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (811, 836), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((895, 927), 'streamlit.title', 'st.title', (['"""This is my Home page"""'], {}), "('This is my Home page')\n", (903, 927), True, 'import streamlit as st\n'), ((960, 982), 'streamlit.title', 'st.title', (['"""Login page"""'], {}), "('Login page')\n", (968, 982), True, 'import streamlit as st\n'), ((1096, 1257), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (1115, 1257), True, 'import streamlit_authenticator as stauth\n'), ((2076, 2101), 'streamlit.title', 'st.title', (['"""Register page"""'], {}), "('Register page')\n", (2084, 2101), True, 'import streamlit as st\n'), ((2215, 2376), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (2234, 2376), True, 'import streamlit_authenticator as stauth\n'), ((2780, 2941), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (2799, 2941), True, 'import streamlit_authenticator as stauth\n'), ((3509, 3670), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (3528, 3670), True, 'import streamlit_authenticator as stauth\n'), ((4252, 4413), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (4271, 4413), True, 'import streamlit_authenticator as stauth\n'), ((5562, 5653), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(100)', 'length_function': 'len'}), '(chunk_size=1000, chunk_overlap=100,\n length_function=len)\n', (5592, 5653), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((5715, 5733), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (5731, 5733), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((5735, 5813), 'streamlit.info', 'st.info', (['"""Creating OpenAI embeddings with PINECONE.... Please wait"""'], {'icon': '"""ℹ️"""'}), "('Creating OpenAI embeddings with PINECONE.... Please wait', icon='ℹ️')\n", (5742, 5813), True, 'import streamlit as st\n'), ((5844, 5906), 'langchain.vectorstores.Pinecone.from_texts', 'Pinecone.from_texts', (['chunks', 'embeddings'], {'index_name': 'index_name'}), '(chunks, embeddings, index_name=index_name)\n', (5863, 5906), False, 'from langchain.vectorstores import Pinecone\n'), ((5906, 5977), 'streamlit.success', 'st.success', (['"""Embeddings generated... Start the conversations"""'], {'icon': '"""✅"""'}), "('Embeddings generated... Start the conversations', icon='✅')\n", (5916, 5977), True, 'import streamlit as st\n'), ((6014, 6070), 'streamlit.session_state.vector_db.similarity_search', 'st.session_state.vector_db.similarity_search', (['query'], {'k': '(2)'}), '(query, k=2)\n', (6058, 6070), True, 'import streamlit as st\n'), ((7087, 7104), 'streamlit.write', 'st.write', (['"""Tasks"""'], {}), "('Tasks')\n", (7095, 7104), True, 'import streamlit as st\n'), ((1040, 1074), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (1049, 1074), False, 'import yaml\n'), ((1404, 1453), 'streamlit.title', 'st.title', (['f"""Welcome *{st.session_state[\'name\']}*"""'], {}), '(f"Welcome *{st.session_state[\'name\']}*")\n', (1412, 1453), True, 'import streamlit as st\n'), ((1462, 1536), 'streamlit.subheader', 'st.subheader', (['"""Click on the Chat to upload document and access AI chatbot"""'], {}), "('Click on the Chat to upload document and access AI chatbot')\n", (1474, 1536), True, 'import streamlit as st\n'), ((1599, 1610), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1608, 1610), False, 'import os\n'), ((1626, 1657), 'os.path.join', 'os.path.join', (['parent', 'user_name'], {}), '(parent, user_name)\n', (1638, 1657), False, 'import os\n'), ((2159, 2193), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (2168, 2193), False, 'import yaml\n'), ((2498, 2542), 'streamlit.success', 'st.success', (['"""User registration successfully"""'], {}), "('User registration successfully')\n", (2508, 2542), True, 'import streamlit as st\n'), ((2596, 2645), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (2605, 2645), False, 'import yaml\n'), ((2724, 2758), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (2733, 2758), False, 'import yaml\n'), ((3111, 3199), 'streamlit.success', 'st.success', (['f"""New random password is : {random_password}.. Change it in next login"""'], {}), "(\n f'New random password is : {random_password}.. Change it in next login')\n", (3121, 3199), True, 'import streamlit as st\n'), ((3325, 3374), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (3334, 3374), False, 'import yaml\n'), ((3453, 3487), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (3462, 3487), False, 'import yaml\n'), ((3955, 4011), 'streamlit.subheader', 'st.subheader', (['"""You need to login to change the password"""'], {}), "('You need to login to change the password')\n", (3967, 4011), True, 'import streamlit as st\n'), ((4065, 4114), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (4074, 4114), False, 'import yaml\n'), ((4196, 4230), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (4205, 4230), False, 'import yaml\n'), ((4716, 4771), 'streamlit.subheader', 'st.subheader', (['"""You need to login to update the profile"""'], {}), "('You need to login to update the profile')\n", (4728, 4771), True, 'import streamlit as st\n'), ((4825, 4874), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (4834, 4874), False, 'import yaml\n'), ((5102, 5143), 'os.path.exists', 'os.path.exists', (['st.session_state.txt_path'], {}), '(st.session_state.txt_path)\n', (5116, 5143), False, 'import os\n'), ((5147, 5182), 'os.mkdir', 'os.mkdir', (['st.session_state.txt_path'], {}), '(st.session_state.txt_path)\n', (5155, 5182), False, 'import os\n'), ((6298, 6345), 'streamlit.chat_input', 'st.chat_input', (['"""Ask question about PDF content"""'], {}), "('Ask question about PDF content')\n", (6311, 6345), True, 'import streamlit as st\n'), ((6349, 6418), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (6381, 6418), True, 'import streamlit as st\n'), ((6780, 6854), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': result}"], {}), "({'role': 'assistant', 'content': result})\n", (6812, 6854), True, 'import streamlit as st\n'), ((7545, 7556), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7554, 7556), False, 'import os\n'), ((7606, 7784), 'streamlit_option_menu.option_menu', 'option_menu', (['None', "['Home', 'Login', 'Register', 'Forgot Passoword']"], {'icons': "['house', 'login', 'register', 'gear']", 'on_change': 'about', 'key': '"""menu_5"""', 'orientation': '"""vertical"""'}), "(None, ['Home', 'Login', 'Register', 'Forgot Passoword'], icons=\n ['house', 'login', 'register', 'gear'], on_change=about, key='menu_5',\n orientation='vertical')\n", (7617, 7784), False, 'from streamlit_option_menu import option_menu\n'), ((7856, 7922), 'streamlit.selectbox', 'st.selectbox', (['"""Document type"""', "('None', 'PDF', 'TXT', 'RST', 'MD')"], {}), "('Document type', ('None', 'PDF', 'TXT', 'RST', 'MD'))\n", (7868, 7922), True, 'import streamlit as st\n'), ((7955, 8053), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload files"""'], {'type': "['pdf', 'txt', 'rst', 'md']", 'accept_multiple_files': '(True)'}), "('Upload files', type=['pdf', 'txt', 'rst', 'md'],\n accept_multiple_files=True)\n", (7971, 8053), True, 'import streamlit as st\n'), ((8065, 8084), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (8074, 8084), True, 'import streamlit as st\n'), ((1673, 1693), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1687, 1693), False, 'import os\n'), ((1707, 1721), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1715, 1721), False, 'import os\n'), ((1873, 1915), 'streamlit.error', 'st.error', (['"""Username/password is incorrect"""'], {}), "('Username/password is incorrect')\n", (1881, 1915), True, 'import streamlit as st\n'), ((3241, 3271), 'streamlit.error', 'st.error', (['"""Username not found"""'], {}), "('Username not found')\n", (3249, 3271), True, 'import streamlit as st\n'), ((3858, 3892), 'streamlit.success', 'st.success', (['"""New password changed"""'], {}), "('New password changed')\n", (3868, 3892), True, 'import streamlit as st\n'), ((4611, 4653), 'streamlit.success', 'st.success', (['"""Entries updated successfully"""'], {}), "('Entries updated successfully')\n", (4621, 4653), True, 'import streamlit as st\n'), ((4957, 5003), 'deep_translator.GoogleTranslator', 'GoogleTranslator', ([], {'source': 'source', 'target': 'target'}), '(source=source, target=target)\n', (4973, 5003), False, 'from deep_translator import GoogleTranslator\n'), ((5283, 5298), 'PyPDF2.PdfReader', 'PdfReader', (['file'], {}), '(file)\n', (5292, 5298), False, 'from PyPDF2 import PdfReader\n'), ((6215, 6247), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (6230, 6247), True, 'import streamlit as st\n'), ((6252, 6283), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (6263, 6283), True, 'import streamlit as st\n'), ((6428, 6451), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (6443, 6451), True, 'import streamlit as st\n'), ((6456, 6475), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (6467, 6475), True, 'import streamlit as st\n'), ((6483, 6511), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6498, 6511), True, 'import streamlit as st\n'), ((6538, 6548), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6546, 6548), True, 'import streamlit as st\n'), ((1984, 2037), 'streamlit.warning', 'st.warning', (['"""Please enter your username and password"""'], {}), "('Please enter your username and password')\n", (1994, 2037), True, 'import streamlit as st\n'), ((6714, 6729), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6724, 6729), False, 'import time\n')]
import langchain_helper as lch import streamlit as st st.title("Generador de nombres para mascotas") st.markdown("Este es un generador de nombres para mascotas. Escriba el tipo de animal que tiene y presione el botón 'Generar nombres'.") animal_type = st.sidebar.selectbox("¿cual es tu mascota?",("gato","perro","cabra","vaca","hamster","conejo","pato","loro","pez","tortuga","serpiente","rana","caballo","oveja","cerdo","gallo","pavo","burro","gallina","pavo")) if animal_type == "gato": pet_color = st.sidebar.text_area(label="¿De qué color es tu gato?", max_chars=15) if animal_type == "perro": pet_color = st.sidebar.text_area(label="¿De qué color es tu perro?", max_chars=15) if pet_color: pet_name = lch.generate_pet_name(animal_type, pet_color) st.text(pet_name)
[ "langchain_helper.generate_pet_name" ]
[((55, 101), 'streamlit.title', 'st.title', (['"""Generador de nombres para mascotas"""'], {}), "('Generador de nombres para mascotas')\n", (63, 101), True, 'import streamlit as st\n'), ((102, 248), 'streamlit.markdown', 'st.markdown', (['"""Este es un generador de nombres para mascotas. Escriba el tipo de animal que tiene y presione el botón \'Generar nombres\'."""'], {}), '(\n "Este es un generador de nombres para mascotas. Escriba el tipo de animal que tiene y presione el botón \'Generar nombres\'."\n )\n', (113, 248), True, 'import streamlit as st\n'), ((254, 496), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""¿cual es tu mascota?"""', "('gato', 'perro', 'cabra', 'vaca', 'hamster', 'conejo', 'pato', 'loro',\n 'pez', 'tortuga', 'serpiente', 'rana', 'caballo', 'oveja', 'cerdo',\n 'gallo', 'pavo', 'burro', 'gallina', 'pavo')"], {}), "('¿cual es tu mascota?', ('gato', 'perro', 'cabra',\n 'vaca', 'hamster', 'conejo', 'pato', 'loro', 'pez', 'tortuga',\n 'serpiente', 'rana', 'caballo', 'oveja', 'cerdo', 'gallo', 'pavo',\n 'burro', 'gallina', 'pavo'))\n", (274, 496), True, 'import streamlit as st\n'), ((508, 577), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""¿De qué color es tu gato?"""', 'max_chars': '(15)'}), "(label='¿De qué color es tu gato?', max_chars=15)\n", (528, 577), True, 'import streamlit as st\n'), ((621, 691), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', ([], {'label': '"""¿De qué color es tu perro?"""', 'max_chars': '(15)'}), "(label='¿De qué color es tu perro?', max_chars=15)\n", (641, 691), True, 'import streamlit as st\n'), ((726, 771), 'langchain_helper.generate_pet_name', 'lch.generate_pet_name', (['animal_type', 'pet_color'], {}), '(animal_type, pet_color)\n', (747, 771), True, 'import langchain_helper as lch\n'), ((776, 793), 'streamlit.text', 'st.text', (['pet_name'], {}), '(pet_name)\n', (783, 793), True, 'import streamlit as st\n')]
from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI # invoking custom retriever from redundant_filter_retriever import RedundantFilterRetriever from dotenv import load_dotenv import langchain langchain.debug = True load_dotenv() # create our chat model chat = ChatOpenAI() embeddings = OpenAIEmbeddings() # instance of chroma for similarity_search but not add contents to db db = Chroma( persist_directory="emb", embedding_function=embeddings ) # set RetrievalQA construct in langchain # retriever -> object that take in string & return relevant docs # call our custom retriever -> RedundantFilterRetriever instead of db.as_retriever() retriever = RedundantFilterRetriever( # pass in customized attributes -> embeddings & chroma embeddings=embeddings, chroma=db ) # retriever = db.as_retriever() chain = RetrievalQA.from_chain_type( llm=chat, retriever=retriever, chain_type="stuff" # refine -> build an initial response, then give the LLM an opport. to update it with further context # "map_reduce" -> build a summary of each doc, then feed each summary to final qn # "stuff" -> take some context from the vector store & "stuff" it into the prompt # "map_rerank" -> find relevant part of each doc & give it a score of how relevant it is ) result = chain.run("What is an interesting fact about the English language") print(result)
[ "langchain.vectorstores.Chroma", "langchain.embeddings.OpenAIEmbeddings", "langchain.chains.RetrievalQA.from_chain_type", "langchain.chat_models.ChatOpenAI" ]
[((344, 357), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (355, 357), False, 'from dotenv import load_dotenv\n'), ((392, 404), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (402, 404), False, 'from langchain.chat_models import ChatOpenAI\n'), ((418, 436), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (434, 436), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((513, 575), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""emb"""', 'embedding_function': 'embeddings'}), "(persist_directory='emb', embedding_function=embeddings)\n", (519, 575), False, 'from langchain.vectorstores import Chroma\n'), ((792, 850), 'redundant_filter_retriever.RedundantFilterRetriever', 'RedundantFilterRetriever', ([], {'embeddings': 'embeddings', 'chroma': 'db'}), '(embeddings=embeddings, chroma=db)\n', (816, 850), False, 'from redundant_filter_retriever import RedundantFilterRetriever\n'), ((962, 1040), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'retriever': 'retriever', 'chain_type': '"""stuff"""'}), "(llm=chat, retriever=retriever, chain_type='stuff')\n", (989, 1040), False, 'from langchain.chains import RetrievalQA\n')]
import os import logging import pickle import ssl import dill import langchain from langchain.prompts import PromptTemplate from langchain.llms import OpenAI, GooglePalm from langchain.chains import LLMChain, RetrievalQAWithSourcesChain, AnalyzeDocumentChain from langchain.chains.qa_with_sources import load_qa_with_sources_chain from langchain.document_loaders import TextLoader, UnstructuredURLLoader from langchain.vectorstores import FAISS from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.schema import StrOutputParser from dotenv import load_dotenv class Vectorizer(): llm = OpenAI(temperature=0.7, max_tokens=1024) embeddings = OpenAIEmbeddings() logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logging.getLogger("httpx").setLevel(logging.WARNING) logger = logging.getLogger(__name__) def __init__(self, file_path: str): self.file_path = os.path.join(os.getcwd(), 'vectors', f'{file_path[:-4]}.pkl') def vector(self, split_docs: list, ) -> bool: self.logger.info('docs: %s', len(split_docs)) # Using OpenAIEmbeddings models to provide further correlational data for our resulting vector for better semantic relationship identification vector_index = FAISS.from_documents(split_docs, self.embeddings) self.logger.info('Vector embedding created') # Exclude SSLContext from pickling dill._dill._reverse_typemap[type(ssl.create_default_context())] = None with open(self.file_path, 'wb') as f: dill.dump(vector_index, f) self.logger.info('Vector index saved') return True def load_index(self): if os.path.exists(self.file_path): with open(self.file_path, 'rb') as f: vector_index = dill.load(f) self.logger.info('Vector index loaded') return vector_index else: self.logger.info('Vector index not found at the provided file path') return False
[ "langchain.vectorstores.FAISS.from_documents", "langchain.embeddings.OpenAIEmbeddings", "langchain.llms.OpenAI" ]
[((670, 710), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.7)', 'max_tokens': '(1024)'}), '(temperature=0.7, max_tokens=1024)\n', (676, 710), False, 'from langchain.llms import OpenAI, GooglePalm\n'), ((728, 746), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (744, 746), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((751, 858), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n", (770, 858), False, 'import logging\n'), ((924, 951), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (941, 951), False, 'import logging\n'), ((1371, 1420), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['split_docs', 'self.embeddings'], {}), '(split_docs, self.embeddings)\n', (1391, 1420), False, 'from langchain.vectorstores import FAISS\n'), ((1821, 1851), 'os.path.exists', 'os.path.exists', (['self.file_path'], {}), '(self.file_path)\n', (1835, 1851), False, 'import os\n'), ((858, 884), 'logging.getLogger', 'logging.getLogger', (['"""httpx"""'], {}), "('httpx')\n", (875, 884), False, 'import logging\n'), ((1035, 1046), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1044, 1046), False, 'import os\n'), ((1672, 1698), 'dill.dump', 'dill.dump', (['vector_index', 'f'], {}), '(vector_index, f)\n', (1681, 1698), False, 'import dill\n'), ((1567, 1595), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (1593, 1595), False, 'import ssl\n'), ((1934, 1946), 'dill.load', 'dill.load', (['f'], {}), '(f)\n', (1943, 1946), False, 'import dill\n')]
# inspired by: https://github.com/rushic24/langchain-remember-me-llm/ # MIT license import torch from json_database import JsonStorageXDG from langchain.embeddings.huggingface import HuggingFaceEmbeddings from langchain.llms.base import LLM from llama_index import Document from llama_index import LLMPredictor, ServiceContext from llama_index import LangchainEmbedding, GPTVectorStoreIndex as GPTSimpleVectorIndex from ovos_plugin_manager.templates.solvers import QuestionSolver from transformers import pipeline class UserInfo: db = JsonStorageXDG("personalLLM") db.setdefault("data", []) @classmethod def remember(cls, fact): cls.db["data"].append(fact) cls.db.store() class PersonalLLMSolver(QuestionSolver): enable_tx = True priority = 80 def __init__(self, config=None): config = config or {} config["lang"] = "en" # only english supported (not really, depends on model... TODO) super().__init__(config) # a class inside a class :O class PersonalUserLLM(LLM): model_name = config.get("model") or "google/flan-t5-small" pipeline = pipeline("text2text-generation", model=model_name, device=0, model_kwargs={"torch_dtype": torch.bfloat16}) initial_prompt = config.get("initial_prompt") or \ 'You are a highly intelligent question answering A.I. based on the information provided by the user. ' \ 'If the answer cannot be found in the user provided information, write "I could not find an answer."' @classmethod def get_engine(cls): llm_predictor = LLMPredictor(llm=cls()) hfemb = HuggingFaceEmbeddings() embed_model = LangchainEmbedding(hfemb) documents = [Document(t) for t in UserInfo.db["data"]] service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model) index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context) return index.as_query_engine() def _call(self, prompt, stop=None): text = f"{self.initial_prompt}\n\n{prompt} {stop}" if stop is not None else f"{self.initial_prompt}\n\n{prompt}" return self.pipeline(text, max_length=9999)[0]["generated_text"] @property def _identifying_params(self): return {"name_of_model": self.model_name} @property def _llm_type(self): return "custom" self.llm = PersonalUserLLM.get_engine() # officially exported Solver methods def get_spoken_answer(self, query, context=None): return self.llm.query(query).response
[ "langchain.embeddings.huggingface.HuggingFaceEmbeddings" ]
[((541, 570), 'json_database.JsonStorageXDG', 'JsonStorageXDG', (['"""personalLLM"""'], {}), "('personalLLM')\n", (555, 570), False, 'from json_database import JsonStorageXDG\n'), ((1152, 1263), 'transformers.pipeline', 'pipeline', (['"""text2text-generation"""'], {'model': 'model_name', 'device': '(0)', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text2text-generation', model=model_name, device=0, model_kwargs={\n 'torch_dtype': torch.bfloat16})\n", (1160, 1263), False, 'from transformers import pipeline\n'), ((1758, 1781), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (1779, 1781), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1812, 1837), 'llama_index.LangchainEmbedding', 'LangchainEmbedding', (['hfemb'], {}), '(hfemb)\n', (1830, 1837), False, 'from llama_index import LangchainEmbedding, GPTVectorStoreIndex as GPTSimpleVectorIndex\n'), ((1943, 2030), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model)\n', (1971, 2030), False, 'from llama_index import LLMPredictor, ServiceContext\n'), ((2050, 2129), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (2085, 2129), True, 'from llama_index import LangchainEmbedding, GPTVectorStoreIndex as GPTSimpleVectorIndex\n'), ((1867, 1878), 'llama_index.Document', 'Document', (['t'], {}), '(t)\n', (1875, 1878), False, 'from llama_index import Document\n')]
# imports import os, shutil, json, re import pathlib from langchain.document_loaders.unstructured import UnstructuredFileLoader from langchain.document_loaders.unstructured import UnstructuredAPIFileLoader from langchain.document_loaders import UnstructuredURLLoader from langchain.docstore.document import Document from google.cloud import storage import base64 import langchain.text_splitter as text_splitter from dotenv import load_dotenv import tempfile import hashlib from langchain.schema import Document import logging from my_llm.pubsub_manager import PubSubManager import datetime from .database import setup_database from .database import delete_row_from_source from .database import return_sources_last24 load_dotenv() def contains_url(message_data): url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') if url_pattern.search(message_data): return True else: return False def extract_urls(text): url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') urls = url_pattern.findall(text) return urls # utility functions def convert_to_txt(file_path): file_dir, file_name = os.path.split(file_path) file_base, file_ext = os.path.splitext(file_name) txt_file = os.path.join(file_dir, f"{file_base}.txt") shutil.copyfile(file_path, txt_file) return txt_file def compute_sha1_from_file(file_path): with open(file_path, "rb") as file: bytes = file.read() readable_hash = hashlib.sha1(bytes).hexdigest() return readable_hash def compute_sha1_from_content(content): readable_hash = hashlib.sha1(content).hexdigest() return readable_hash def add_file_to_gcs(filename: str, vector_name:str, bucket_name: str=None, metadata:dict=None): storage_client = storage.Client() bucket_name = bucket_name if bucket_name is not None else os.getenv('GCS_BUCKET', None) if bucket_name is None: raise ValueError("No bucket found to upload to: GCS_BUCKET returned None") if bucket_name.startswith("gs://"): bucket_name = bucket_name.removeprefix("gs://") logging.info(f"Bucket_name: {bucket_name}") bucket = storage_client.get_bucket(bucket_name) now = datetime.datetime.now() year = now.strftime("%Y") month = now.strftime("%m") day = now.strftime("%d") hour = now.strftime("%H") bucket_filepath = f"{vector_name}/{year}/{month}/{day}/{hour}/{os.path.basename(filename)}" blob = bucket.blob(bucket_filepath) the_metadata = { "vector_name": vector_name, } if metadata is not None: the_metadata.update(metadata) blob.metadata = the_metadata #TODO: create cloud storage pubsub subscription? blob.upload_from_filename(filename) logging.info(f"File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}") # create pubsub topic and subscription if necessary to receive notifications from cloud storage pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"app_to_pubsub_{vector_name}") sub_name = f"pubsub_to_store_{vector_name}" sub_exists = pubsub_manager.subscription_exists(sub_name) if not sub_exists: pubsub_manager.create_subscription(sub_name, push_endpoint=f"/pubsub_to_store/{vector_name}") setup_database(vector_name) return f"gs://{bucket_name}/{bucket_filepath}" def read_url_to_document(url: str, metadata: dict = None): loader = UnstructuredURLLoader(urls=[url]) docs = loader.load() if metadata is not None: for doc in docs: doc.metadata.update(metadata) logging.info(f"UnstructuredURLLoader docs: {docs}") return docs def read_file_to_document(gs_file: pathlib.Path, split=False, metadata: dict = None): #file_sha1 = compute_sha1_from_file(gs_file.name) try: #TODO: Use UnstructuredAPIFileLoader instead? logging.info(f"Sending {gs_file} to UnstructuredAPIFileLoader") loader = UnstructuredAPIFileLoader(gs_file, mode="elements", api_key="FAKE_API_KEY") if split: # only supported for some file types docs = loader.load_and_split() else: docs = loader.load() logging.info(f"Loaded docs for {gs_file} from UnstructuredAPIFileLoader") except ValueError as e: logging.info(f"Error for {gs_file} from UnstructuredAPIFileLoader: {str(e)}") if "file type is not supported in partition" in str(e): logging.info("trying locally via .txt conversion") txt_file = None try: # Convert the file to .txt and try again txt_file = convert_to_txt(gs_file) loader = UnstructuredFileLoader(txt_file, mode="elements") if split: docs = loader.load_and_split() else: docs = loader.load() except Exception as inner_e: raise Exception("An error occurred during txt conversion or loading.") from inner_e finally: # Ensure cleanup happens if txt_file was created if txt_file is not None and os.path.exists(txt_file): os.remove(txt_file) else: raise except Exception as e: logging.error(f"An unexpected error occurred for {gs_file}: {str(e)}") raise for doc in docs: #doc.metadata["file_sha1"] = file_sha1 logging.info(f"doc_content: {doc.page_content[:30]}") if metadata is not None: doc.metadata.update(metadata) logging.info(f"gs_file: {gs_file} turned into {len(docs)} documents") return docs def choose_splitter(extension: str, chunk_size: int=1024, chunk_overlap:int=0): if extension == ".py": return text_splitter.PythonCodeTextSplitter() elif extension == ".md": return text_splitter.MarkdownTextSplitter() return text_splitter.RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) def remove_whitespace(page_content: str): return page_content.replace("\n", " ").replace("\r", " ").replace("\t", " ").replace(" ", " ") def chunk_doc_to_docs(documents: list, extension: str = ".md"): """Turns a Document object into a list of many Document chunks""" source_chunks = [] for document in documents: splitter = choose_splitter(extension) for chunk in splitter.split_text(remove_whitespace(document.page_content)): source_chunks.append(Document(page_content=chunk, metadata=document.metadata)) return source_chunks def data_to_embed_pubsub(data: dict, vector_name:str="documents"): """Triggered from a message on a Cloud Pub/Sub topic. Args: data JSON """ #hash = data['message']['data'] message_data = base64.b64decode(data['message']['data']).decode('utf-8') attributes = data['message'].get('attributes', {}) messageId = data['message'].get('messageId') publishTime = data['message'].get('publishTime') logging.info(f"data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}") logging.info(f"data_to_embed_pubsub data: {message_data}") # pubsub from a Google Cloud Storage push topic if attributes.get("eventType", None) is not None and attributes.get("payloadFormat", None) is not None: eventType = attributes.get("eventType") payloadFormat = attributes.get("payloadFormat") if eventType == "OBJECT_FINALIZE" and payloadFormat == "JSON_API_V1": logging.info("Got valid event from Google Cloud Storage") the_object = attributes.get("objectId", None) if the_object is None: logging.info("No object found") return attributes if the_object.endswith("/"): logging.info("GCS object is a directory only") return attributes # https://cloud.google.com/storage/docs/json_api/v1/objects#resource-representations message_data = 'gs://' + attributes.get("bucketId") + '/' + the_object if '/' in the_object: bucket_vector_name = the_object.split('/')[0] if len(bucket_vector_name) > 0 and vector_name != bucket_vector_name: logging.info(f"Overwriting vector_name {vector_name} with {bucket_vector_name}") vector_name = bucket_vector_name attributes["attrs"] = f"namespace:{vector_name}" logging.info(f"Constructed message_data: {message_data}") metadata = attributes logging.info(f"Found metadata in pubsub: {metadata}") chunks = [] if message_data.startswith('"gs://'): message_data = message_data.strip('\"') if message_data.startswith("gs://"): logging.info("Detected gs://") bucket_name, file_name = message_data[5:].split("/", 1) # Create a client storage_client = storage.Client() # Download the file from GCS bucket = storage_client.get_bucket(bucket_name) blob = bucket.blob(file_name) file_name=pathlib.Path(file_name) with tempfile.TemporaryDirectory() as temp_dir: tmp_file_path = os.path.join(temp_dir, file_name.name) blob.download_to_filename(tmp_file_path) the_metadata = { "source": message_data, "type": "file_load_gcs", "bucket_name": bucket_name } metadata.update(the_metadata) docs = read_file_to_document(tmp_file_path, metadata=metadata) chunks = chunk_doc_to_docs(docs, file_name.suffix) logging.info(f"Split {file_name} into {len(chunks)} chunks") elif message_data.startswith("http"): logging.info(f"Got http message: {message_data}") # just in case, extract the URL again urls = extract_urls(message_data) docs = [] for url in urls: metadata["source"] = url metadata["url"] = url metadata["type"] = "url_load" doc = read_url_to_document(url, metadata=metadata) docs.extend(doc) chunks = chunk_doc_to_docs(docs) logging.info(f"Split {url} into {len(chunks)} chunks") else: logging.info("No gs:// detected") the_json = json.loads(message_data) the_metadata = the_json.get("metadata", {}) metadata.update(the_metadata) the_content = the_json.get("page_content", None) if metadata.get("source", None) is not None: metadata["source"] = "No source embedded" if the_content is None: logging.info("No content found") return {"metadata": "No content found"} docs = [Document(page_content=the_content, metadata=metadata)] publish_if_urls(the_content, vector_name) chunks = chunk_doc_to_docs(docs) logging.info(f"Split content into {len(chunks)} chunks") publish_chunks(chunks, vector_name=vector_name) logging.info(f"data_to_embed_pubsub published chunks with metadata: {metadata}") pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"pubsub_state_messages") pubsub_manager.publish_message(f"pubsub_chunk - Added doc with metadata: {metadata} to {vector_name}") return metadata def publish_if_urls(the_content, vector_name): """ Extracts URLs and puts them in a queue for processing on PubSub """ if contains_url(the_content): logging.info("Detected http://") urls = extract_urls(the_content) for url in urls: publish_text(url, vector_name) def publish_chunks(chunks: list[Document], vector_name: str): logging.info("Publishing chunks to embed_chunk") pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"embed_chunk_{vector_name}") sub_name = f"pubsub_chunk_to_store_{vector_name}" sub_exists = pubsub_manager.subscription_exists(sub_name) if not sub_exists: pubsub_manager.create_subscription(sub_name, push_endpoint=f"/pubsub_chunk_to_store/{vector_name}") setup_database(vector_name) for chunk in chunks: # Convert chunk to string, as Pub/Sub messages must be strings or bytes chunk_str = chunk.json() pubsub_manager.publish_message(chunk_str) def publish_text(text:str, vector_name: str): logging.info(f"Publishing text to app_to_pubsub_{vector_name}") pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"app_to_pubsub_{vector_name}") sub_name = f"pubsub_to_store_{vector_name}" sub_exists = pubsub_manager.subscription_exists(sub_name) if not sub_exists: pubsub_manager.create_subscription(sub_name, push_endpoint=f"/pubsub_chunk_to_store/{vector_name}") setup_database(vector_name) pubsub_manager.publish_message(text) def delete_source(source:str, vector_name:str): logging.info(f"Deleting source: {source} from {vector_name}") delete_row_from_source(source, vector_name) logging.info(f"Deleted source: {source} from {vector_name}") def return_sources_last24_(vector_name:str): logging.info(f"Returning sources last 24") rows = return_sources_last24(vector_name) return rows
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.document_loaders.UnstructuredURLLoader", "langchain.document_loaders.unstructured.UnstructuredAPIFileLoader", "langchain.text_splitter.MarkdownTextSplitter", "langchain.schema.Document", "langchain.document_loaders.unstructured.UnstructuredFileLoader", "langchain.text_splitter.PythonCodeTextSplitter" ]
[((719, 732), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (730, 732), False, 'from dotenv import load_dotenv\n'), ((784, 892), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (794, 892), False, 'import os, shutil, json, re\n'), ((1015, 1123), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (1025, 1123), False, 'import os, shutil, json, re\n'), ((1242, 1266), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (1255, 1266), False, 'import os, shutil, json, re\n'), ((1293, 1320), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1309, 1320), False, 'import os, shutil, json, re\n'), ((1336, 1378), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{file_base}.txt"""'], {}), "(file_dir, f'{file_base}.txt')\n", (1348, 1378), False, 'import os, shutil, json, re\n'), ((1383, 1419), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'txt_file'], {}), '(file_path, txt_file)\n', (1398, 1419), False, 'import os, shutil, json, re\n'), ((1869, 1885), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (1883, 1885), False, 'from google.cloud import storage\n'), ((2200, 2243), 'logging.info', 'logging.info', (['f"""Bucket_name: {bucket_name}"""'], {}), "(f'Bucket_name: {bucket_name}')\n", (2212, 2243), False, 'import logging\n'), ((2306, 2329), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2327, 2329), False, 'import datetime\n'), ((2853, 2939), 'logging.info', 'logging.info', (['f"""File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}"""'], {}), "(\n f'File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}')\n", (2865, 2939), False, 'import logging\n'), ((3058, 3129), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""app_to_pubsub_{vector_name}"""'}), "(vector_name, pubsub_topic=f'app_to_pubsub_{vector_name}')\n", (3071, 3129), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((3584, 3617), 'langchain.document_loaders.UnstructuredURLLoader', 'UnstructuredURLLoader', ([], {'urls': '[url]'}), '(urls=[url])\n', (3605, 3617), False, 'from langchain.document_loaders import UnstructuredURLLoader\n'), ((3748, 3799), 'logging.info', 'logging.info', (['f"""UnstructuredURLLoader docs: {docs}"""'], {}), "(f'UnstructuredURLLoader docs: {docs}')\n", (3760, 3799), False, 'import logging\n'), ((6112, 6212), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'text_splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (6156, 6212), True, 'import langchain.text_splitter as text_splitter\n'), ((7229, 7342), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}"""'], {}), "(\n f'data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}'\n )\n", (7241, 7342), False, 'import logging\n'), ((7337, 7395), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub data: {message_data}"""'], {}), "(f'data_to_embed_pubsub data: {message_data}')\n", (7349, 7395), False, 'import logging\n'), ((8835, 8888), 'logging.info', 'logging.info', (['f"""Found metadata in pubsub: {metadata}"""'], {}), "(f'Found metadata in pubsub: {metadata}')\n", (8847, 8888), False, 'import logging\n'), ((11324, 11409), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub published chunks with metadata: {metadata}"""'], {}), "(f'data_to_embed_pubsub published chunks with metadata: {metadata}'\n )\n", (11336, 11409), False, 'import logging\n'), ((11426, 11491), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""pubsub_state_messages"""'}), "(vector_name, pubsub_topic=f'pubsub_state_messages')\n", (11439, 11491), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((12019, 12067), 'logging.info', 'logging.info', (['"""Publishing chunks to embed_chunk"""'], {}), "('Publishing chunks to embed_chunk')\n", (12031, 12067), False, 'import logging\n'), ((12094, 12163), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""embed_chunk_{vector_name}"""'}), "(vector_name, pubsub_topic=f'embed_chunk_{vector_name}')\n", (12107, 12163), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((12749, 12812), 'logging.info', 'logging.info', (['f"""Publishing text to app_to_pubsub_{vector_name}"""'], {}), "(f'Publishing text to app_to_pubsub_{vector_name}')\n", (12761, 12812), False, 'import logging\n'), ((12834, 12905), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""app_to_pubsub_{vector_name}"""'}), "(vector_name, pubsub_topic=f'app_to_pubsub_{vector_name}')\n", (12847, 12905), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((13331, 13392), 'logging.info', 'logging.info', (['f"""Deleting source: {source} from {vector_name}"""'], {}), "(f'Deleting source: {source} from {vector_name}')\n", (13343, 13392), False, 'import logging\n'), ((13445, 13505), 'logging.info', 'logging.info', (['f"""Deleted source: {source} from {vector_name}"""'], {}), "(f'Deleted source: {source} from {vector_name}')\n", (13457, 13505), False, 'import logging\n'), ((13557, 13599), 'logging.info', 'logging.info', (['f"""Returning sources last 24"""'], {}), "(f'Returning sources last 24')\n", (13569, 13599), False, 'import logging\n'), ((1949, 1978), 'os.getenv', 'os.getenv', (['"""GCS_BUCKET"""', 'None'], {}), "('GCS_BUCKET', None)\n", (1958, 1978), False, 'import os, shutil, json, re\n'), ((4044, 4107), 'logging.info', 'logging.info', (['f"""Sending {gs_file} to UnstructuredAPIFileLoader"""'], {}), "(f'Sending {gs_file} to UnstructuredAPIFileLoader')\n", (4056, 4107), False, 'import logging\n'), ((4125, 4200), 'langchain.document_loaders.unstructured.UnstructuredAPIFileLoader', 'UnstructuredAPIFileLoader', (['gs_file'], {'mode': '"""elements"""', 'api_key': '"""FAKE_API_KEY"""'}), "(gs_file, mode='elements', api_key='FAKE_API_KEY')\n", (4150, 4200), False, 'from langchain.document_loaders.unstructured import UnstructuredAPIFileLoader\n'), ((5632, 5685), 'logging.info', 'logging.info', (['f"""doc_content: {doc.page_content[:30]}"""'], {}), "(f'doc_content: {doc.page_content[:30]}')\n", (5644, 5685), False, 'import logging\n'), ((5976, 6014), 'langchain.text_splitter.PythonCodeTextSplitter', 'text_splitter.PythonCodeTextSplitter', ([], {}), '()\n', (6012, 6014), True, 'import langchain.text_splitter as text_splitter\n'), ((9047, 9077), 'logging.info', 'logging.info', (['"""Detected gs://"""'], {}), "('Detected gs://')\n", (9059, 9077), False, 'import logging\n'), ((9194, 9210), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (9208, 9210), False, 'from google.cloud import storage\n'), ((9362, 9385), 'pathlib.Path', 'pathlib.Path', (['file_name'], {}), '(file_name)\n', (9374, 9385), False, 'import pathlib\n'), ((11795, 11827), 'logging.info', 'logging.info', (['"""Detected http://"""'], {}), "('Detected http://')\n", (11807, 11827), False, 'import logging\n'), ((1691, 1712), 'hashlib.sha1', 'hashlib.sha1', (['content'], {}), '(content)\n', (1703, 1712), False, 'import hashlib\n'), ((2518, 2544), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2534, 2544), False, 'import os, shutil, json, re\n'), ((4379, 4452), 'logging.info', 'logging.info', (['f"""Loaded docs for {gs_file} from UnstructuredAPIFileLoader"""'], {}), "(f'Loaded docs for {gs_file} from UnstructuredAPIFileLoader')\n", (4391, 4452), False, 'import logging\n'), ((6059, 6095), 'langchain.text_splitter.MarkdownTextSplitter', 'text_splitter.MarkdownTextSplitter', ([], {}), '()\n', (6093, 6095), True, 'import langchain.text_splitter as text_splitter\n'), ((7009, 7050), 'base64.b64decode', 'base64.b64decode', (["data['message']['data']"], {}), "(data['message']['data'])\n", (7025, 7050), False, 'import base64\n'), ((7751, 7808), 'logging.info', 'logging.info', (['"""Got valid event from Google Cloud Storage"""'], {}), "('Got valid event from Google Cloud Storage')\n", (7763, 7808), False, 'import logging\n'), ((8741, 8798), 'logging.info', 'logging.info', (['f"""Constructed message_data: {message_data}"""'], {}), "(f'Constructed message_data: {message_data}')\n", (8753, 8798), False, 'import logging\n'), ((9400, 9429), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9427, 9429), False, 'import tempfile\n'), ((9471, 9509), 'os.path.join', 'os.path.join', (['temp_dir', 'file_name.name'], {}), '(temp_dir, file_name.name)\n', (9483, 9509), False, 'import os, shutil, json, re\n'), ((10036, 10085), 'logging.info', 'logging.info', (['f"""Got http message: {message_data}"""'], {}), "(f'Got http message: {message_data}')\n", (10048, 10085), False, 'import logging\n'), ((10548, 10581), 'logging.info', 'logging.info', (['"""No gs:// detected"""'], {}), "('No gs:// detected')\n", (10560, 10581), False, 'import logging\n'), ((10610, 10634), 'json.loads', 'json.loads', (['message_data'], {}), '(message_data)\n', (10620, 10634), False, 'import os, shutil, json, re\n'), ((1573, 1592), 'hashlib.sha1', 'hashlib.sha1', (['bytes'], {}), '(bytes)\n', (1585, 1592), False, 'import hashlib\n'), ((4643, 4693), 'logging.info', 'logging.info', (['"""trying locally via .txt conversion"""'], {}), "('trying locally via .txt conversion')\n", (4655, 4693), False, 'import logging\n'), ((6705, 6761), 'langchain.schema.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'document.metadata'}), '(page_content=chunk, metadata=document.metadata)\n', (6713, 6761), False, 'from langchain.schema import Document\n'), ((7919, 7950), 'logging.info', 'logging.info', (['"""No object found"""'], {}), "('No object found')\n", (7931, 7950), False, 'import logging\n'), ((8055, 8101), 'logging.info', 'logging.info', (['"""GCS object is a directory only"""'], {}), "('GCS object is a directory only')\n", (8067, 8101), False, 'import logging\n'), ((10935, 10967), 'logging.info', 'logging.info', (['"""No content found"""'], {}), "('No content found')\n", (10947, 10967), False, 'import logging\n'), ((11045, 11098), 'langchain.schema.Document', 'Document', ([], {'page_content': 'the_content', 'metadata': 'metadata'}), '(page_content=the_content, metadata=metadata)\n', (11053, 11098), False, 'from langchain.schema import Document\n'), ((4872, 4921), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['txt_file'], {'mode': '"""elements"""'}), "(txt_file, mode='elements')\n", (4894, 4921), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((8533, 8618), 'logging.info', 'logging.info', (['f"""Overwriting vector_name {vector_name} with {bucket_vector_name}"""'], {}), "(f'Overwriting vector_name {vector_name} with {bucket_vector_name}'\n )\n", (8545, 8618), False, 'import logging\n'), ((5335, 5359), 'os.path.exists', 'os.path.exists', (['txt_file'], {}), '(txt_file)\n', (5349, 5359), False, 'import os, shutil, json, re\n'), ((5381, 5400), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (5390, 5400), False, 'import os, shutil, json, re\n')]
from langchain.llms import LlamaCpp from langchain.chat_models import ChatOpenAI from langchain.chains.llm import LLMChain from langchain.prompts import PromptTemplate from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.cache import SQLiteCache import langchain import itertools from utils import setup_logger from dotenv import load_dotenv import os # Load the .env file load_dotenv() OPEN_AI_KEY = os.getenv("OPEN_AI_KEY") os.environ["OPENAI_API_KEY"] = OPEN_AI_KEY logger = setup_logger('contr_detector_logger', 'app.log') langchain.llm_cache = SQLiteCache(database_path=".langchain.db") callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) llm_llama = LlamaCpp( # model_path="llama-2-7b.Q4_K_M.gguf", model_path="models/OpenOrca-Platypus2-13B-Q4_K_M.gguf", temperature=0, max_tokens=1000, top_p=3, callback_manager=callback_manager, verbose=True, # Verbose is required to pass to the callback manager ) # TODO: move the prompt to a file to be configured prompt_template = """ Statement 1: {doc1} Statement 2: {doc2} Question: Are these two statements contradictory? Answer "yes" or "no". """ prompt = PromptTemplate.from_template(prompt_template) llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=OPEN_AI_KEY) llm_chain = LLMChain(llm=llm, prompt=prompt) def detect_contradictions(documents, metadatas, model_type: str): contrs = [] for doc1, doc2 in itertools.combinations(zip(documents, metadatas), 2): # print(doc1) doc1, meta1 = doc1 doc2, meta2 = doc2 if model_type == "openAI": llm = llm_chain result = llm_chain({"doc1": doc1, "doc2": doc2}, return_only_outputs=True) print(result) if "yes" in result['text'].lower(): logger.info(f"Contradiction: {doc1} {doc2}") print(f"Contradiction: {doc1} {doc2}") contrs.append(((doc1, meta1), (doc2, meta2))) # break # TODO: remove else: logger.info(f"No contradiction: {doc1} {doc2}") print(f"No contradiction: {doc1} {doc2}") else: llm = llm_llama prompt = f""" Statement 1: {doc1} Statement 2: {doc2} Question: Are these two statements contradictory? Answer "yes" or "no". """ if "yes" in llm(prompt).lower(): logger.info(f"Contradiction: {doc1} {doc2}") print(f"Contradiction: {doc1} {doc2}") contrs.append(((doc1, meta1), (doc2, meta2))) else: logger.info(f"No contradiction: {doc1} {doc2}") print(f"No contradiction: {doc1} {doc2}") print("Done with checking for contradictions") print(contrs) return contrs
[ "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler", "langchain.chains.llm.LLMChain", "langchain.chat_models.ChatOpenAI", "langchain.llms.LlamaCpp", "langchain.cache.SQLiteCache", "langchain.prompts.PromptTemplate.from_template" ]
[((476, 489), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (487, 489), False, 'from dotenv import load_dotenv\n'), ((505, 529), 'os.getenv', 'os.getenv', (['"""OPEN_AI_KEY"""'], {}), "('OPEN_AI_KEY')\n", (514, 529), False, 'import os\n'), ((584, 632), 'utils.setup_logger', 'setup_logger', (['"""contr_detector_logger"""', '"""app.log"""'], {}), "('contr_detector_logger', 'app.log')\n", (596, 632), False, 'from utils import setup_logger\n'), ((655, 697), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (666, 697), False, 'from langchain.cache import SQLiteCache\n'), ((783, 946), 'langchain.llms.LlamaCpp', 'LlamaCpp', ([], {'model_path': '"""models/OpenOrca-Platypus2-13B-Q4_K_M.gguf"""', 'temperature': '(0)', 'max_tokens': '(1000)', 'top_p': '(3)', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), "(model_path='models/OpenOrca-Platypus2-13B-Q4_K_M.gguf',\n temperature=0, max_tokens=1000, top_p=3, callback_manager=\n callback_manager, verbose=True)\n", (791, 946), False, 'from langchain.llms import LlamaCpp\n'), ((1340, 1385), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (1368, 1385), False, 'from langchain.prompts import PromptTemplate\n'), ((1393, 1479), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'OPEN_AI_KEY'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_key=\n OPEN_AI_KEY)\n", (1403, 1479), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1487, 1519), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1495, 1519), False, 'from langchain.chains.llm import LLMChain\n'), ((736, 768), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (766, 768), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')]
import streamlit as st import torch from transformers import ( AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer, ) import whisper import os ############ config ############ # general config whisper_model_names=["tiny", "base", "small", "medium", "large"] data_root_path = os.path.join('.','data') file_types = ['pdf','png','jpg','wav'] for filetype in file_types: if not os.path.exists(os.path.join(data_root_path,filetype)): os.makedirs(os.path.join(data_root_path,filetype)) # streamlit config ## Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [{"role": "assistant", "content": "Type a message to start a conversation"}] ############ User Interface ############ # Title st.title('LLAMA RAG Demo') st.divider() st.title('Model name and auth token') # Configs model_name = st.text_input('Enter your Hugging Face model name', value="meta-llama/Llama-2-7b-chat-hf") auth_token = st.text_input('Enter your Hugging Face auth token', value="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") system_prompt = st.text_area('Enter your system prompt', value="You are a helpful, respectful and honest assistant.") whisper_model_name = st.selectbox('Select your whisper model',options=whisper_model_names) use_cuda = st.checkbox('Use CUDA', value=True) isfile = False ## File uploader from streamlit import file_uploader uploadedfile = file_uploader("Choose a \"PDF\" file (now support only pdf)") if uploadedfile is not None: isfile = True with open(os.path.join(data_root_path,'pdf',uploadedfile.name),"wb") as f: f.write(uploadedfile.getbuffer()) st.success("File uploaded successfully : {}".format(uploadedfile.name)) st.divider() ############ function ############ def clear_chat_history(): st.session_state.messages = [{"role": "assistant", "content": "Type a message to start a conversation"}] for file in os.listdir(os.path.join(data_root_path,'pdf')): os.remove(os.path.join(data_root_path,'pdf',file)) st.button('Clear Chat History', on_click=clear_chat_history) # Load Tokenizer and Model @st.cache_resource def get_tokenizer_model(): # Create tokenizer tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir='./model/', token=auth_token) # Create model quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type='nf4', bnb_4bit_compute_dtype=torch.float16, ) model = AutoModelForCausalLM.from_pretrained(model_name, cache_dir='./model/', token=auth_token, quantization_config=quantization_config, # rope_scaling={"type":"dynamic", "factor":2}, max_memory=f'{int(torch.cuda.mem_get_info()[0]/1024**3)-2}GB' ) return tokenizer, model # RAG engine def get_rag_queryengine(_tokenizer, model, system_prompt): from llama_index.prompts.prompts import SimpleInputPrompt from llama_index.llms import HuggingFaceLLM system_prompt_ = f"[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n" query_wrapper_prompt = SimpleInputPrompt("{query_str} [/INST]") llm = HuggingFaceLLM(context_window=4096, max_new_tokens=256, system_prompt=system_prompt_, query_wrapper_prompt=query_wrapper_prompt, model=model, tokenizer=_tokenizer ) # Create embeddings from llama_index.embeddings import LangchainEmbedding from langchain.embeddings.huggingface import HuggingFaceEmbeddings embeddings=LangchainEmbedding( HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") ) from llama_index import ServiceContext from llama_index import set_global_service_context service_context = ServiceContext.from_defaults( chunk_size=1024, llm=llm, embed_model=embeddings ) set_global_service_context(service_context) from llama_index import VectorStoreIndex, download_loader PyMuPDFReader = download_loader("PyMuPDFReader") loader = PyMuPDFReader() for file in os.listdir(os.path.join(data_root_path,'pdf')): # !!! This is not a good way to load data. I will fix this later # this makes the only last file in the folder to be loaded documents = loader.load_data(file_path=os.path.join(data_root_path,'pdf',file), metadata=True) index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine() return query_engine # whisper def whisper_stt(*,model, device, audio_path)->str: # load model # # model : model name of whisper. default is base # # devie : argument from args. default is cpu audio_model = whisper.load_model(model,device) # stt - audio.wav result = audio_model.transcribe(audio_path) # return result : str list return result["text"] ############ main ############ # Load Tokenizer and Model, RAG engine tokenizer, model = get_tokenizer_model() if isfile: engine = get_rag_queryengine(tokenizer, model, system_prompt) # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) prompt = st.chat_input('User: ') if prompt: # update(append) chat history st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.write(prompt) # Here... text streamer does not work as well as I intended with streamlit # I will try to fix this later if st.session_state.messages[-1]["role"] == "user": if isfile: with st.chat_message("assistant"): # model inference output_text = engine.query(prompt) placeholder = st.empty() placeholder.markdown(output_text) st.session_state.messages.append({"role": "assistant", "content": output_text}) else: with st.chat_message("assistant"): # model inference output_text = "Please upload a file first" placeholder = st.empty() placeholder.markdown(output_text) st.session_state.messages.append({"role": "assistant", "content": output_text})
[ "langchain.embeddings.huggingface.HuggingFaceEmbeddings" ]
[((306, 331), 'os.path.join', 'os.path.join', (['"""."""', '"""data"""'], {}), "('.', 'data')\n", (318, 331), False, 'import os\n'), ((772, 798), 'streamlit.title', 'st.title', (['"""LLAMA RAG Demo"""'], {}), "('LLAMA RAG Demo')\n", (780, 798), True, 'import streamlit as st\n'), ((799, 811), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (809, 811), True, 'import streamlit as st\n'), ((813, 850), 'streamlit.title', 'st.title', (['"""Model name and auth token"""'], {}), "('Model name and auth token')\n", (821, 850), True, 'import streamlit as st\n'), ((874, 969), 'streamlit.text_input', 'st.text_input', (['"""Enter your Hugging Face model name"""'], {'value': '"""meta-llama/Llama-2-7b-chat-hf"""'}), "('Enter your Hugging Face model name', value=\n 'meta-llama/Llama-2-7b-chat-hf')\n", (887, 969), True, 'import streamlit as st\n'), ((978, 1081), 'streamlit.text_input', 'st.text_input', (['"""Enter your Hugging Face auth token"""'], {'value': '"""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"""'}), "('Enter your Hugging Face auth token', value=\n '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')\n", (991, 1081), True, 'import streamlit as st\n'), ((1093, 1199), 'streamlit.text_area', 'st.text_area', (['"""Enter your system prompt"""'], {'value': '"""You are a helpful, respectful and honest assistant."""'}), "('Enter your system prompt', value=\n 'You are a helpful, respectful and honest assistant.')\n", (1105, 1199), True, 'import streamlit as st\n'), ((1216, 1286), 'streamlit.selectbox', 'st.selectbox', (['"""Select your whisper model"""'], {'options': 'whisper_model_names'}), "('Select your whisper model', options=whisper_model_names)\n", (1228, 1286), True, 'import streamlit as st\n'), ((1297, 1332), 'streamlit.checkbox', 'st.checkbox', (['"""Use CUDA"""'], {'value': '(True)'}), "('Use CUDA', value=True)\n", (1308, 1332), True, 'import streamlit as st\n'), ((1416, 1475), 'streamlit.file_uploader', 'file_uploader', (['"""Choose a "PDF" file (now support only pdf)"""'], {}), '(\'Choose a "PDF" file (now support only pdf)\')\n', (1429, 1475), False, 'from streamlit import file_uploader\n'), ((1722, 1734), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (1732, 1734), True, 'import streamlit as st\n'), ((2038, 2098), 'streamlit.button', 'st.button', (['"""Clear Chat History"""'], {'on_click': 'clear_chat_history'}), "('Clear Chat History', on_click=clear_chat_history)\n", (2047, 2098), True, 'import streamlit as st\n'), ((5259, 5282), 'streamlit.chat_input', 'st.chat_input', (['"""User: """'], {}), "('User: ')\n", (5272, 5282), True, 'import streamlit as st\n'), ((2212, 2298), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {'cache_dir': '"""./model/"""', 'token': 'auth_token'}), "(model_name, cache_dir='./model/', token=\n auth_token)\n", (2241, 2298), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer\n'), ((2339, 2445), 'transformers.BitsAndBytesConfig', 'BitsAndBytesConfig', ([], {'load_in_4bit': '(True)', 'bnb_4bit_quant_type': '"""nf4"""', 'bnb_4bit_compute_dtype': 'torch.float16'}), "(load_in_4bit=True, bnb_4bit_quant_type='nf4',\n bnb_4bit_compute_dtype=torch.float16)\n", (2357, 2445), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer\n'), ((3074, 3114), 'llama_index.prompts.prompts.SimpleInputPrompt', 'SimpleInputPrompt', (['"""{query_str} [/INST]"""'], {}), "('{query_str} [/INST]')\n", (3091, 3114), False, 'from llama_index.prompts.prompts import SimpleInputPrompt\n'), ((3125, 3297), 'llama_index.llms.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'context_window': '(4096)', 'max_new_tokens': '(256)', 'system_prompt': 'system_prompt_', 'query_wrapper_prompt': 'query_wrapper_prompt', 'model': 'model', 'tokenizer': '_tokenizer'}), '(context_window=4096, max_new_tokens=256, system_prompt=\n system_prompt_, query_wrapper_prompt=query_wrapper_prompt, model=model,\n tokenizer=_tokenizer)\n', (3139, 3297), False, 'from llama_index.llms import HuggingFaceLLM\n'), ((3786, 3864), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(1024)', 'llm': 'llm', 'embed_model': 'embeddings'}), '(chunk_size=1024, llm=llm, embed_model=embeddings)\n', (3814, 3864), False, 'from llama_index import ServiceContext\n'), ((3899, 3942), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (3925, 3942), False, 'from llama_index import set_global_service_context\n'), ((4026, 4058), 'llama_index.download_loader', 'download_loader', (['"""PyMuPDFReader"""'], {}), "('PyMuPDFReader')\n", (4041, 4058), False, 'from llama_index import VectorStoreIndex, download_loader\n'), ((4407, 4449), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (4438, 4449), False, 'from llama_index import VectorStoreIndex, download_loader\n'), ((4720, 4753), 'whisper.load_model', 'whisper.load_model', (['model', 'device'], {}), '(model, device)\n', (4738, 4753), False, 'import whisper\n'), ((5332, 5401), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (5364, 5401), True, 'import streamlit as st\n'), ((1937, 1972), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""'], {}), "(data_root_path, 'pdf')\n", (1949, 1972), False, 'import os\n'), ((3607, 3659), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (3628, 3659), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((4115, 4150), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""'], {}), "(data_root_path, 'pdf')\n", (4127, 4150), False, 'import os\n'), ((5175, 5207), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5190, 5207), True, 'import streamlit as st\n'), ((5217, 5248), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (5228, 5248), True, 'import streamlit as st\n'), ((5411, 5434), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (5426, 5434), True, 'import streamlit as st\n'), ((5444, 5460), 'streamlit.write', 'st.write', (['prompt'], {}), '(prompt)\n', (5452, 5460), True, 'import streamlit as st\n'), ((5847, 5926), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': output_text}"], {}), "({'role': 'assistant', 'content': output_text})\n", (5879, 5926), True, 'import streamlit as st\n'), ((6157, 6236), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': output_text}"], {}), "({'role': 'assistant', 'content': output_text})\n", (6189, 6236), True, 'import streamlit as st\n'), ((424, 462), 'os.path.join', 'os.path.join', (['data_root_path', 'filetype'], {}), '(data_root_path, filetype)\n', (436, 462), False, 'import os\n'), ((484, 522), 'os.path.join', 'os.path.join', (['data_root_path', 'filetype'], {}), '(data_root_path, filetype)\n', (496, 522), False, 'import os\n'), ((1539, 1593), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""', 'uploadedfile.name'], {}), "(data_root_path, 'pdf', uploadedfile.name)\n", (1551, 1593), False, 'import os\n'), ((1992, 2033), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""', 'file'], {}), "(data_root_path, 'pdf', file)\n", (2004, 2033), False, 'import os\n'), ((5648, 5676), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (5663, 5676), True, 'import streamlit as st\n'), ((5782, 5792), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (5790, 5792), True, 'import streamlit as st\n'), ((5951, 5979), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (5966, 5979), True, 'import streamlit as st\n'), ((6092, 6102), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6100, 6102), True, 'import streamlit as st\n'), ((4339, 4380), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""', 'file'], {}), "(data_root_path, 'pdf', file)\n", (4351, 4380), False, 'import os\n'), ((2716, 2741), 'torch.cuda.mem_get_info', 'torch.cuda.mem_get_info', ([], {}), '()\n', (2739, 2741), False, 'import torch\n')]
import streamlit as st import langchain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Chroma from langchain import OpenAI, VectorDBQA from langchain.chains import RetrievalQAWithSourcesChain import PyPDF2 #This function will go through pdf and extract and return list of page texts. def read_and_textify(files): text_list = [] sources_list = [] for file in files: pdfReader = PyPDF2.PdfReader(file) #print("Page Number:", len(pdfReader.pages)) for i in range(len(pdfReader.pages)): pageObj = pdfReader.pages[i] text = pageObj.extract_text() pageObj.clear() text_list.append(text) sources_list.append(file.name + "_page_"+str(i)) return [text_list,sources_list] st.set_page_config(layout="centered", page_title="Multidoc_QnA") st.header("Multidoc_QnA") st.write("---") #file uploader uploaded_files = st.file_uploader("Upload documents",accept_multiple_files=True, type=["txt","pdf"]) st.write("---") if uploaded_files is None: st.info(f"""Upload files to analyse""") elif uploaded_files: st.write(str(len(uploaded_files)) + " document(s) loaded..") textify_output = read_and_textify(uploaded_files) documents = textify_output[0] sources = textify_output[1] #extract embeddings embeddings = OpenAIEmbeddings(openai_api_key = st.secrets["openai_api_key"]) #vstore with metadata. Here we will store page numbers. vStore = Chroma.from_texts(documents, embeddings, metadatas=[{"source": s} for s in sources]) #deciding model model_name = "gpt-3.5-turbo" # model_name = "gpt-4" retriever = vStore.as_retriever() retriever.search_kwargs = {'k':2} #initiate model llm = OpenAI(model_name=model_name, openai_api_key = st.secrets["openai_api_key"], streaming=True) model = RetrievalQAWithSourcesChain.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever) st.header("Ask your data") user_q = st.text_area("Enter your questions here") if st.button("Get Response"): try: with st.spinner("Model is working on it..."): result = model({"question":user_q}, return_only_outputs=True) st.subheader('Your response:') st.write(result['answer']) st.subheader('Source pages:') st.write(result['sources']) except Exception as e: st.error(f"An error occurred: {e}") st.error('Oops, the GPT response resulted in an error :( Please try again with a different question.')
[ "langchain.chains.RetrievalQAWithSourcesChain.from_chain_type", "langchain.vectorstores.Chroma.from_texts", "langchain.OpenAI", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((868, 932), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""centered"""', 'page_title': '"""Multidoc_QnA"""'}), "(layout='centered', page_title='Multidoc_QnA')\n", (886, 932), True, 'import streamlit as st\n'), ((933, 958), 'streamlit.header', 'st.header', (['"""Multidoc_QnA"""'], {}), "('Multidoc_QnA')\n", (942, 958), True, 'import streamlit as st\n'), ((959, 974), 'streamlit.write', 'st.write', (['"""---"""'], {}), "('---')\n", (967, 974), True, 'import streamlit as st\n'), ((1010, 1100), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload documents"""'], {'accept_multiple_files': '(True)', 'type': "['txt', 'pdf']"}), "('Upload documents', accept_multiple_files=True, type=[\n 'txt', 'pdf'])\n", (1026, 1100), True, 'import streamlit as st\n'), ((1094, 1109), 'streamlit.write', 'st.write', (['"""---"""'], {}), "('---')\n", (1102, 1109), True, 'import streamlit as st\n'), ((1140, 1175), 'streamlit.info', 'st.info', (['f"""Upload files to analyse"""'], {}), "(f'Upload files to analyse')\n", (1147, 1175), True, 'import streamlit as st\n'), ((510, 532), 'PyPDF2.PdfReader', 'PyPDF2.PdfReader', (['file'], {}), '(file)\n', (526, 532), False, 'import PyPDF2\n'), ((1424, 1485), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': "st.secrets['openai_api_key']"}), "(openai_api_key=st.secrets['openai_api_key'])\n", (1440, 1485), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1557, 1645), 'langchain.vectorstores.Chroma.from_texts', 'Chroma.from_texts', (['documents', 'embeddings'], {'metadatas': "[{'source': s} for s in sources]"}), "(documents, embeddings, metadatas=[{'source': s} for s in\n sources])\n", (1574, 1645), False, 'from langchain.vectorstores import Chroma\n'), ((1816, 1910), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': 'model_name', 'openai_api_key': "st.secrets['openai_api_key']", 'streaming': '(True)'}), "(model_name=model_name, openai_api_key=st.secrets['openai_api_key'],\n streaming=True)\n", (1822, 1910), False, 'from langchain import OpenAI, VectorDBQA\n'), ((1919, 2016), 'langchain.chains.RetrievalQAWithSourcesChain.from_chain_type', 'RetrievalQAWithSourcesChain.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever'}), "(llm=llm, chain_type='stuff',\n retriever=retriever)\n", (1962, 2016), False, 'from langchain.chains import RetrievalQAWithSourcesChain\n'), ((2018, 2044), 'streamlit.header', 'st.header', (['"""Ask your data"""'], {}), "('Ask your data')\n", (2027, 2044), True, 'import streamlit as st\n'), ((2056, 2097), 'streamlit.text_area', 'st.text_area', (['"""Enter your questions here"""'], {}), "('Enter your questions here')\n", (2068, 2097), True, 'import streamlit as st\n'), ((2106, 2131), 'streamlit.button', 'st.button', (['"""Get Response"""'], {}), "('Get Response')\n", (2115, 2131), True, 'import streamlit as st\n'), ((2153, 2192), 'streamlit.spinner', 'st.spinner', (['"""Model is working on it..."""'], {}), "('Model is working on it...')\n", (2163, 2192), True, 'import streamlit as st\n'), ((2272, 2302), 'streamlit.subheader', 'st.subheader', (['"""Your response:"""'], {}), "('Your response:')\n", (2284, 2302), True, 'import streamlit as st\n'), ((2311, 2337), 'streamlit.write', 'st.write', (["result['answer']"], {}), "(result['answer'])\n", (2319, 2337), True, 'import streamlit as st\n'), ((2346, 2375), 'streamlit.subheader', 'st.subheader', (['"""Source pages:"""'], {}), "('Source pages:')\n", (2358, 2375), True, 'import streamlit as st\n'), ((2384, 2411), 'streamlit.write', 'st.write', (["result['sources']"], {}), "(result['sources'])\n", (2392, 2411), True, 'import streamlit as st\n'), ((2445, 2480), 'streamlit.error', 'st.error', (['f"""An error occurred: {e}"""'], {}), "(f'An error occurred: {e}')\n", (2453, 2480), True, 'import streamlit as st\n'), ((2487, 2599), 'streamlit.error', 'st.error', (['"""Oops, the GPT response resulted in an error :( Please try again with a different question."""'], {}), "(\n 'Oops, the GPT response resulted in an error :( Please try again with a different question.'\n )\n", (2495, 2599), True, 'import streamlit as st\n')]
from __future__ import annotations import asyncio import functools import logging import os import warnings from contextlib import asynccontextmanager, contextmanager from contextvars import ContextVar from typing import ( Any, AsyncGenerator, Dict, Generator, List, Optional, Type, TypeVar, Union, cast, ) from uuid import UUID, uuid4 import langchain from langchain.callbacks.base import ( BaseCallbackHandler, BaseCallbackManager, ChainManagerMixin, LLMManagerMixin, RunManagerMixin, ToolManagerMixin, ) from langchain.callbacks.openai_info import OpenAICallbackHandler from langchain.callbacks.stdout import StdOutCallbackHandler from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1 from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler from langchain.callbacks.tracers.wandb import WandbTracer from langchain.schema import ( AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string, ) logger = logging.getLogger(__name__) Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar( "openai_callback", default=None ) tracing_callback_var: ContextVar[ Optional[LangChainTracerV1] ] = ContextVar( # noqa: E501 "tracing_callback", default=None ) wandb_tracing_callback_var: ContextVar[ Optional[WandbTracer] ] = ContextVar( # noqa: E501 "tracing_wandb_callback", default=None ) tracing_v2_callback_var: ContextVar[ Optional[LangChainTracer] ] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) def _get_debug() -> bool: return langchain.debug @contextmanager def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]: """Get OpenAI callback handler in a context manager.""" cb = OpenAICallbackHandler() openai_callback_var.set(cb) yield cb openai_callback_var.set(None) @contextmanager def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Get Tracer in a context manager.""" cb = LangChainTracerV1() session = cast(TracerSessionV1, cb.load_session(session_name)) tracing_callback_var.set(cb) yield session tracing_callback_var.set(None) @contextmanager def wandb_tracing_enabled( session_name: str = "default", ) -> Generator[None, None, None]: """Get WandbTracer in a context manager.""" cb = WandbTracer() wandb_tracing_callback_var.set(cb) yield None wandb_tracing_callback_var.set(None) @contextmanager def tracing_v2_enabled( session_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, ) -> Generator[None, None, None]: """Get the experimental tracer handler in a context manager.""" # Issue a warning that this is experimental warnings.warn( "The tracing v2 API is in development. " "This is not yet stable and may change in the future." ) if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer( example_id=example_id, session_name=session_name, ) tracing_v2_callback_var.set(cb) yield tracing_v2_callback_var.set(None) @contextmanager def trace_as_chain_group( group_name: str, *, session_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, ) -> Generator[CallbackManager, None, None]: """Get a callback manager for a chain group in a context manager.""" cb = LangChainTracer( session_name=session_name, example_id=example_id, ) cm = CallbackManager.configure( inheritable_callbacks=[cb], ) run_manager = cm.on_chain_start({"name": group_name}, {}) yield run_manager.get_child() run_manager.on_chain_end({}) @asynccontextmanager async def atrace_as_chain_group( group_name: str, *, session_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, ) -> AsyncGenerator[AsyncCallbackManager, None]: """Get a callback manager for a chain group in a context manager.""" cb = LangChainTracer( session_name=session_name, example_id=example_id, ) cm = AsyncCallbackManager.configure( inheritable_callbacks=[cb], ) run_manager = await cm.on_chain_start({"name": group_name}, {}) try: yield run_manager.get_child() finally: await run_manager.on_chain_end({}) def _handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for CallbackManager.""" message_strings: Optional[List[str]] = None for handler in handlers: try: if ignore_condition_name is None or not getattr( handler, ignore_condition_name ): getattr(handler, event_name)(*args, **kwargs) except NotImplementedError as e: if event_name == "on_chat_model_start": if message_strings is None: message_strings = [get_buffer_string(m) for m in args[1]] _handle_event( [handler], "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning(f"Error in {event_name} callback: {e}") except Exception as e: if handler.raise_error: raise e logger.warning(f"Error in {event_name} callback: {e}") async def _ahandle_event_for_handler( handler: BaseCallbackHandler, event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) else: await asyncio.get_event_loop().run_in_executor( None, functools.partial(event, *args, **kwargs) ) except NotImplementedError as e: if event_name == "on_chat_model_start": message_strings = [get_buffer_string(m) for m in args[1]] await _ahandle_event_for_handler( handler, "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning(f"Error in {event_name} callback: {e}") except Exception as e: if handler.raise_error: raise e logger.warning(f"Error in {event_name} callback: {e}") async def _ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for AsyncCallbackManager.""" await asyncio.gather( *( _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) for handler in handlers ) ) BRM = TypeVar("BRM", bound="BaseRunManager") class BaseRunManager(RunManagerMixin): """Base class for run manager (a bound callback manager).""" def __init__( self, run_id: UUID, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler], parent_run_id: Optional[UUID] = None, ) -> None: """Initialize run manager.""" self.run_id = run_id self.handlers = handlers self.inheritable_handlers = inheritable_handlers self.parent_run_id = parent_run_id @classmethod def get_noop_manager(cls: Type[BRM]) -> BRM: """Return a manager that doesn't perform any operations.""" return cls(uuid4(), [], []) class RunManager(BaseRunManager): """Sync Run Manager.""" def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received.""" _handle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncRunManager(BaseRunManager): """Async Run Manager.""" async def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received.""" await _ahandle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): """Callback manager for LLM run.""" def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token.""" _handle_event( self.handlers, "on_llm_new_token", "ignore_llm", token=token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" _handle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors.""" _handle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): """Async callback manager for LLM run.""" async def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token.""" await _ahandle_event( self.handlers, "on_llm_new_token", "ignore_llm", token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" await _ahandle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors.""" await _ahandle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForChainRun(RunManager, ChainManagerMixin): """Callback manager for chain run.""" def get_child(self) -> CallbackManager: """Get a child callback manager.""" manager = CallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" _handle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors.""" _handle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received.""" _handle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received.""" _handle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin): """Async callback manager for chain run.""" def get_child(self) -> AsyncCallbackManager: """Get a child callback manager.""" manager = AsyncCallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" await _ahandle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors.""" await _ahandle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received.""" await _ahandle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received.""" await _ahandle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForToolRun(RunManager, ToolManagerMixin): """Callback manager for tool run.""" def get_child(self) -> CallbackManager: """Get a child callback manager.""" manager = CallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager def on_tool_end( self, output: str, **kwargs: Any, ) -> None: """Run when tool ends running.""" _handle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors.""" _handle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin): """Async callback manager for tool run.""" def get_child(self) -> AsyncCallbackManager: """Get a child callback manager.""" manager = AsyncCallbackManager([], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) return manager async def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" await _ahandle_event( self.handlers, "on_tool_end", "ignore_agent", output, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when tool errors.""" await _ahandle_event( self.handlers, "on_tool_error", "ignore_agent", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManager(BaseCallbackManager): """Callback manager that can be used to handle callbacks from langchain.""" def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForLLMRun: """Run when LLM starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, prompts, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForLLMRun( run_id, self.handlers, self.inheritable_handlers, self.parent_run_id ) def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForLLMRun: """Run when LLM starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, messages, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) # Re-use the LLM Run Manager since the outputs are treated # the same for now return CallbackManagerForLLMRun( run_id, self.handlers, self.inheritable_handlers, self.parent_run_id ) def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForChainRun: """Run when chain starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForChainRun( run_id, self.handlers, self.inheritable_handlers, self.parent_run_id ) def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> CallbackManagerForToolRun: """Run when tool starts running.""" if run_id is None: run_id = uuid4() _handle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return CallbackManagerForToolRun( run_id, self.handlers, self.inheritable_handlers, self.parent_run_id ) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, ) -> CallbackManager: """Configure the callback manager.""" return _configure(cls, inheritable_callbacks, local_callbacks, verbose) class AsyncCallbackManager(BaseCallbackManager): """Async callback manager that can be used to handle callbacks from LangChain.""" @property def is_async(self) -> bool: """Return whether the handler is async.""" return True async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForLLMRun: """Run when LLM starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_llm_start", "ignore_llm", serialized, prompts, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForLLMRun( run_id, self.handlers, self.inheritable_handlers, self.parent_run_id ) async def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_chat_model_start", "ignore_chat_model", serialized, messages, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForLLMRun( run_id, self.handlers, self.inheritable_handlers, self.parent_run_id ) async def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForChainRun: """Run when chain starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_chain_start", "ignore_chain", serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForChainRun( run_id, self.handlers, self.inheritable_handlers, self.parent_run_id ) async def on_tool_start( self, serialized: Dict[str, Any], input_str: str, run_id: Optional[UUID] = None, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> AsyncCallbackManagerForToolRun: """Run when tool starts running.""" if run_id is None: run_id = uuid4() await _ahandle_event( self.handlers, "on_tool_start", "ignore_agent", serialized, input_str, run_id=run_id, parent_run_id=self.parent_run_id, **kwargs, ) return AsyncCallbackManagerForToolRun( run_id, self.handlers, self.inheritable_handlers, self.parent_run_id ) @classmethod def configure( cls, inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, ) -> AsyncCallbackManager: """Configure the callback manager.""" return _configure(cls, inheritable_callbacks, local_callbacks, verbose) T = TypeVar("T", CallbackManager, AsyncCallbackManager) def env_var_is_set(env_var: str) -> bool: """Check if an environment variable is set.""" return env_var in os.environ and os.environ[env_var] not in ( "", "0", "false", "False", ) def _configure( callback_manager_cls: Type[T], inheritable_callbacks: Callbacks = None, local_callbacks: Callbacks = None, verbose: bool = False, ) -> T: """Configure the callback manager.""" callback_manager = callback_manager_cls([]) if inheritable_callbacks or local_callbacks: if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None: inheritable_callbacks_ = inheritable_callbacks or [] callback_manager = callback_manager_cls( handlers=inheritable_callbacks_.copy(), inheritable_handlers=inheritable_callbacks_.copy(), ) else: callback_manager = callback_manager_cls( handlers=inheritable_callbacks.handlers, inheritable_handlers=inheritable_callbacks.inheritable_handlers, parent_run_id=inheritable_callbacks.parent_run_id, ) local_handlers_ = ( local_callbacks if isinstance(local_callbacks, list) else (local_callbacks.handlers if local_callbacks else []) ) for handler in local_handlers_: callback_manager.add_handler(handler, False) tracer = tracing_callback_var.get() wandb_tracer = wandb_tracing_callback_var.get() open_ai = openai_callback_var.get() tracing_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING") or tracer is not None or env_var_is_set("LANGCHAIN_HANDLER") ) wandb_tracing_enabled_ = ( env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None ) tracer_v2 = tracing_v2_callback_var.get() tracing_v2_enabled_ = ( env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None ) tracer_session = os.environ.get("LANGCHAIN_SESSION") debug = _get_debug() if tracer_session is None: tracer_session = "default" if ( verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or wandb_tracing_enabled_ or open_ai is not None ): if verbose and not any( isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers ): if debug: pass else: callback_manager.add_handler(StdOutCallbackHandler(), False) if debug and not any( isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(ConsoleCallbackHandler(), True) if tracing_enabled_ and not any( isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers ): if tracer: callback_manager.add_handler(tracer, True) else: handler = LangChainTracerV1() handler.load_session(tracer_session) callback_manager.add_handler(handler, True) if wandb_tracing_enabled_ and not any( isinstance(handler, WandbTracer) for handler in callback_manager.handlers ): if wandb_tracer: callback_manager.add_handler(wandb_tracer, True) else: handler = WandbTracer() callback_manager.add_handler(handler, True) if tracing_v2_enabled_ and not any( isinstance(handler, LangChainTracer) for handler in callback_manager.handlers ): if tracer_v2: callback_manager.add_handler(tracer_v2, True) else: try: handler = LangChainTracer(session_name=tracer_session) callback_manager.add_handler(handler, True) except Exception as e: logger.warning( "Unable to load requested LangChainTracer." " To disable this warning," " unset the LANGCHAIN_TRACING_V2 environment variables.", e, ) if open_ai is not None and not any( isinstance(handler, OpenAICallbackHandler) for handler in callback_manager.handlers ): callback_manager.add_handler(open_ai, True) return callback_manager
[ "langchain.schema.get_buffer_string", "langchain.callbacks.stdout.StdOutCallbackHandler", "langchain.callbacks.tracers.wandb.WandbTracer", "langchain.callbacks.openai_info.OpenAICallbackHandler", "langchain.callbacks.tracers.stdout.ConsoleCallbackHandler", "langchain.callbacks.tracers.langchain.LangChainTracer", "langchain.callbacks.tracers.langchain_v1.LangChainTracerV1" ]
[((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((1286, 1329), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1296, 1329), False, 'from contextvars import ContextVar\n'), ((1406, 1450), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback"""'], {'default': 'None'}), "('tracing_callback', default=None)\n", (1416, 1450), False, 'from contextvars import ContextVar\n'), ((1541, 1591), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_wandb_callback"""'], {'default': 'None'}), "('tracing_wandb_callback', default=None)\n", (1551, 1591), False, 'from contextvars import ContextVar\n'), ((1684, 1731), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback_v2"""'], {'default': 'None'}), "('tracing_callback_v2', default=None)\n", (1694, 1731), False, 'from contextvars import ContextVar\n'), ((7547, 7585), 'typing.TypeVar', 'TypeVar', (['"""BRM"""'], {'bound': '"""BaseRunManager"""'}), "('BRM', bound='BaseRunManager')\n", (7554, 7585), False, 'from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((24466, 24517), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'CallbackManager', 'AsyncCallbackManager'], {}), "('T', CallbackManager, AsyncCallbackManager)\n", (24473, 24517), False, 'from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((1969, 1992), 'langchain.callbacks.openai_info.OpenAICallbackHandler', 'OpenAICallbackHandler', ([], {}), '()\n', (1990, 1992), False, 'from langchain.callbacks.openai_info import OpenAICallbackHandler\n'), ((2243, 2262), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (2260, 2262), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((2587, 2600), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (2598, 2600), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((2990, 3107), 'warnings.warn', 'warnings.warn', (['"""The tracing v2 API is in development. This is not yet stable and may change in the future."""'], {}), "(\n 'The tracing v2 API is in development. This is not yet stable and may change in the future.'\n )\n", (3003, 3107), False, 'import warnings\n'), ((3206, 3271), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'example_id': 'example_id', 'session_name': 'session_name'}), '(example_id=example_id, session_name=session_name)\n', (3221, 3271), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((3669, 3734), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'session_name', 'example_id': 'example_id'}), '(session_name=session_name, example_id=example_id)\n', (3684, 3734), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((4272, 4337), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'session_name', 'example_id': 'example_id'}), '(session_name=session_name, example_id=example_id)\n', (4287, 4337), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((26539, 26574), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_SESSION"""'], {}), "('LANGCHAIN_SESSION')\n", (26553, 26574), False, 'import os\n'), ((3180, 3196), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (3184, 3196), False, 'from uuid import UUID, uuid4\n'), ((6199, 6233), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['event'], {}), '(event)\n', (6226, 6233), False, 'import asyncio\n'), ((8264, 8271), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (8269, 8271), False, 'from uuid import UUID, uuid4\n'), ((18037, 18044), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18042, 18044), False, 'from uuid import UUID, uuid4\n'), ((18744, 18751), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18749, 18751), False, 'from uuid import UUID, uuid4\n'), ((19547, 19554), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (19552, 19554), False, 'from uuid import UUID, uuid4\n'), ((20282, 20289), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20287, 20289), False, 'from uuid import UUID, uuid4\n'), ((21564, 21571), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (21569, 21571), False, 'from uuid import UUID, uuid4\n'), ((22225, 22232), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22230, 22232), False, 'from uuid import UUID, uuid4\n'), ((22958, 22965), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22963, 22965), False, 'from uuid import UUID, uuid4\n'), ((23716, 23723), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (23721, 23723), False, 'from uuid import UUID, uuid4\n'), ((27319, 27343), 'langchain.callbacks.tracers.stdout.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (27341, 27343), False, 'from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler\n'), ((27633, 27652), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (27650, 27652), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((28048, 28061), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (28059, 28061), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((6564, 6584), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (6581, 6584), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((27096, 27119), 'langchain.callbacks.stdout.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (27117, 27119), False, 'from langchain.callbacks.stdout import StdOutCallbackHandler\n'), ((28436, 28480), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'tracer_session'}), '(session_name=tracer_session)\n', (28451, 28480), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((6388, 6429), 'functools.partial', 'functools.partial', (['event', '*args'], {}), '(event, *args, **kwargs)\n', (6405, 6429), False, 'import functools\n'), ((5291, 5311), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (5308, 5311), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((6320, 6344), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6342, 6344), False, 'import asyncio\n')]
from langchain.prompts import PromptTemplate from langchain_community.embeddings import HuggingFaceEmbeddings from langchain.vectorstores.faiss import FAISS from langchain_community.llms import CTransformers from langchain.chains import RetrievalQA import chainlit as cl from chainlit import LangchainCallbackHandler DB_FAISS_PATH = "vectorstores/db_faiss" custom_prompt_template = """ USe the following pieces of information to answer the user's question. If you don't know the answer , please just say that you don't know the answer, don't try to make up an answer. Context: {context} Question :{question} Only returns the helpful answer below and nothing else. Helpful Answer:""" def set_custom_prompt(): """ Prompt Template for QA retriveal for each vector stores """ prompt = PromptTemplate(template = custom_prompt_template, input_variables = ['context','question']) return prompt def load_llm(): llm = CTransformers( model = "llama-2-7b-chat.ggmlv3.q8_0.bin", model_type = "llama", max_new_tokens = "512", temperature = "0.2" ) return llm def retrieveal_qa_chain(llm,prompt,db): qa_chain = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever = db.as_retriever(search_kwargs = {'k':2}), return_source_documents = True, chain_type_kwargs = {'prompt':prompt} ) return qa_chain def qa_bot(): embeddings = HuggingFaceEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L12-v2', model_kwargs = {'device': 'cpu'}) #Run the model in CPU db = FAISS.load_local(DB_FAISS_PATH, embeddings) llm = load_llm() qa_prompt = set_custom_prompt() qa = retrieveal_qa_chain(llm, qa_prompt, db) return qa def final_result(query): qa_result = qa_bot() response = qa_result({'Query': query}) return response #Chainlit @cl.on_chat_start async def start(): chain = qa_bot() msg = cl.Message(content="Starting the Bot....") await msg.send() msg.content = "Hi, Bot is ready to answer your questions" await msg.update() cl.user_session.set("chain", chain) @cl.on_message async def main(message): chain = cl.user_session.get("chain") cb = LangchainCallbackHandler( stream_final_answer=True, answer_prefix_tokens=['Final', "Answer"] ) cb.answer_reached = True res = await chain.acall(message, callbacks=[cb]) answer = res['result'] sources = res["source_documents"] if sources: answer += f"\nSources:" + str(sources) else: answer += "\n No sources found" await cl.Message(content=answer).send()
[ "langchain.vectorstores.faiss.FAISS.load_local", "langchain_community.llms.CTransformers", "langchain.prompts.PromptTemplate", "langchain_community.embeddings.HuggingFaceEmbeddings" ]
[((827, 919), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'custom_prompt_template', 'input_variables': "['context', 'question']"}), "(template=custom_prompt_template, input_variables=['context',\n 'question'])\n", (841, 919), False, 'from langchain.prompts import PromptTemplate\n'), ((970, 1089), 'langchain_community.llms.CTransformers', 'CTransformers', ([], {'model': '"""llama-2-7b-chat.ggmlv3.q8_0.bin"""', 'model_type': '"""llama"""', 'max_new_tokens': '"""512"""', 'temperature': '"""0.2"""'}), "(model='llama-2-7b-chat.ggmlv3.q8_0.bin', model_type='llama',\n max_new_tokens='512', temperature='0.2')\n", (983, 1089), False, 'from langchain_community.llms import CTransformers\n'), ((1505, 1616), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L12-v2"""', 'model_kwargs': "{'device': 'cpu'}"}), "(model_name='sentence-transformers/all-MiniLM-L12-v2',\n model_kwargs={'device': 'cpu'})\n", (1526, 1616), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((1696, 1739), 'langchain.vectorstores.faiss.FAISS.load_local', 'FAISS.load_local', (['DB_FAISS_PATH', 'embeddings'], {}), '(DB_FAISS_PATH, embeddings)\n', (1712, 1739), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((2072, 2114), 'chainlit.Message', 'cl.Message', ([], {'content': '"""Starting the Bot...."""'}), "(content='Starting the Bot....')\n", (2082, 2114), True, 'import chainlit as cl\n'), ((2229, 2264), 'chainlit.user_session.set', 'cl.user_session.set', (['"""chain"""', 'chain'], {}), "('chain', chain)\n", (2248, 2264), True, 'import chainlit as cl\n'), ((2322, 2350), 'chainlit.user_session.get', 'cl.user_session.get', (['"""chain"""'], {}), "('chain')\n", (2341, 2350), True, 'import chainlit as cl\n'), ((2361, 2458), 'chainlit.LangchainCallbackHandler', 'LangchainCallbackHandler', ([], {'stream_final_answer': '(True)', 'answer_prefix_tokens': "['Final', 'Answer']"}), "(stream_final_answer=True, answer_prefix_tokens=[\n 'Final', 'Answer'])\n", (2385, 2458), False, 'from chainlit import LangchainCallbackHandler\n'), ((2762, 2788), 'chainlit.Message', 'cl.Message', ([], {'content': 'answer'}), '(content=answer)\n', (2772, 2788), True, 'import chainlit as cl\n')]
from typing import Any, Dict, List, Optional, Union import os from langchain_experimental.agents.agent_toolkits import create_csv_agent # from langchain.llms import OpenAI from langchain.agents.agent_types import AgentType from langchain_google_genai import ChatGoogleGenerativeAI # from langchain.chat_models import ChatOpenAI # import requests import os os.environ["GOOGLE_API_KEY"] = "AIzaSyBv6A0mzzKHry7TbT6xInbRjRgu9H8_P5o" llm = ChatGoogleGenerativeAI(model="gemini-pro") def initializeAgent(csv_filename): return create_csv_agent(llm, csv_filename, verbose=True) # import langchain type definitons
[ "langchain_experimental.agents.agent_toolkits.create_csv_agent", "langchain_google_genai.ChatGoogleGenerativeAI" ]
[((439, 481), 'langchain_google_genai.ChatGoogleGenerativeAI', 'ChatGoogleGenerativeAI', ([], {'model': '"""gemini-pro"""'}), "(model='gemini-pro')\n", (461, 481), False, 'from langchain_google_genai import ChatGoogleGenerativeAI\n'), ((530, 579), 'langchain_experimental.agents.agent_toolkits.create_csv_agent', 'create_csv_agent', (['llm', 'csv_filename'], {'verbose': '(True)'}), '(llm, csv_filename, verbose=True)\n', (546, 579), False, 'from langchain_experimental.agents.agent_toolkits import create_csv_agent\n')]
import langchain from langchain.llms import VertexAI from langchain.prompts import PromptTemplate, load_prompt import wandb from wandb.integration.langchain import WandbTracer import streamlit as st from google.oauth2 import service_account # account_info = dict(st.secrets["GOOGLE_APPLICATION_CREDENTIALS"]) # credentials = service_account.Credentials.from_service_account_info(account_info) def generate_prd_v3_palm(new_feature, new_feature_desc, wandb_name): wandb.login(key=st.secrets["WANDB_API_KEY"]) wandb.init( project="generate_prd_v3_palm", config={ "model": "text-bison-001", "temperature": 0.2 }, entity="arihantsheth", name=wandb_name, ) # llm = VertexAI(credentials=credentials, max_output_tokens=1024) llm = VertexAI(project="synap-labs-390404", location="us-central1", credentials=dict( st.secrets["GOOGLE_APPLICATION_CREDENTIALS"]), max_output_tokens=1024) prompt_template = load_prompt("prompt_templates/generate_prd_template_v2.json") # For deployment # prompt_template = load_prompt("../prompt_templates/generate_prd_template_v3.json") # For local testing prompt = prompt_template.format( new_feature=new_feature, new_feature_desc=new_feature_desc) try: output = llm(prompt, callbacks=[WandbTracer()]) except Exception as e: print("GCP Authentication error") print(e) return # with open(f"./generated_prds/{new_feature}_prd_v3_palm.md", "w") as f: # For deployment # # with open(f"../generated_prds/{new_feature}_prd_palm.md", "w") as f: # For local testing # f.write(output) wandb.finish() return output
[ "langchain.prompts.load_prompt" ]
[((469, 513), 'wandb.login', 'wandb.login', ([], {'key': "st.secrets['WANDB_API_KEY']"}), "(key=st.secrets['WANDB_API_KEY'])\n", (480, 513), False, 'import wandb\n'), ((519, 666), 'wandb.init', 'wandb.init', ([], {'project': '"""generate_prd_v3_palm"""', 'config': "{'model': 'text-bison-001', 'temperature': 0.2}", 'entity': '"""arihantsheth"""', 'name': 'wandb_name'}), "(project='generate_prd_v3_palm', config={'model':\n 'text-bison-001', 'temperature': 0.2}, entity='arihantsheth', name=\n wandb_name)\n", (529, 666), False, 'import wandb\n'), ((993, 1054), 'langchain.prompts.load_prompt', 'load_prompt', (['"""prompt_templates/generate_prd_template_v2.json"""'], {}), "('prompt_templates/generate_prd_template_v2.json')\n", (1004, 1054), False, 'from langchain.prompts import PromptTemplate, load_prompt\n'), ((1679, 1693), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (1691, 1693), False, 'import wandb\n'), ((1339, 1352), 'wandb.integration.langchain.WandbTracer', 'WandbTracer', ([], {}), '()\n', (1350, 1352), False, 'from wandb.integration.langchain import WandbTracer\n')]
#!/usr/bin/env python # coding: utf-8 # # LangChain: Agents # # ## Outline: # # * Using built in LangChain tools: DuckDuckGo search and Wikipedia # * Defining your own tools # In[ ]: import os from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) # read local .env file import warnings warnings.filterwarnings("ignore") # Note: LLM's do not always produce the same results. When executing the code in your notebook, you may get slightly different answers that those in the video. # In[ ]: # account for deprecation of LLM model import datetime # Get the current date current_date = datetime.datetime.now().date() # Define the date after which the model should be set to "gpt-3.5-turbo" target_date = datetime.date(2024, 6, 12) # Set the model variable based on the current date if current_date > target_date: llm_model = "gpt-3.5-turbo" else: llm_model = "gpt-3.5-turbo-0301" # ## Built-in LangChain tools # In[ ]: #!pip install -U wikipedia # In[ ]: from langchain.agents.agent_toolkits import create_python_agent from langchain.agents import load_tools, initialize_agent from langchain.agents import AgentType from langchain.tools.python.tool import PythonREPLTool from langchain.python import PythonREPL from langchain.chat_models import ChatOpenAI # In[ ]: llm = ChatOpenAI(temperature=0, model=llm_model) # In[ ]: tools = load_tools(["llm-math","wikipedia"], llm=llm) # In[ ]: agent= initialize_agent( tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose = True) # In[ ]: agent("What is the 25% of 300?") # ## Wikipedia example # In[ ]: question = "Tom M. Mitchell is an American computer scientist \ and the Founders University Professor at Carnegie Mellon University (CMU)\ what book did he write?" result = agent(question) # ## Python Agent # In[ ]: agent = create_python_agent( llm, tool=PythonREPLTool(), verbose=True ) # In[ ]: customer_list = [["Harrison", "Chase"], ["Lang", "Chain"], ["Dolly", "Too"], ["Elle", "Elem"], ["Geoff","Fusion"], ["Trance","Former"], ["Jen","Ayai"] ] # In[ ]: agent.run(f"""Sort these customers by \ last name and then first name \ and print the output: {customer_list}""") # #### View detailed outputs of the chains # In[ ]: import langchain langchain.debug=True agent.run(f"""Sort these customers by \ last name and then first name \ and print the output: {customer_list}""") langchain.debug=False # ## Define your own tool # In[ ]: #!pip install DateTime # In[ ]: from langchain.agents import tool from datetime import date # In[ ]: @tool def time(text: str) -> str: """Returns todays date, use this for any \ questions related to knowing todays date. \ The input should always be an empty string, \ and this function will always return todays \ date - any date mathmatics should occur \ outside this function.""" return str(date.today()) # In[ ]: agent= initialize_agent( tools + [time], llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose = True) # **Note**: # # The agent will sometimes come to the wrong conclusion (agents are a work in progress!). # # If it does, please try running it again. # In[ ]: try: result = agent("whats the date today?") except: print("exception on external access") # Reminder: Download your notebook to you local computer to save your work. # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]:
[ "langchain.agents.initialize_agent", "langchain.tools.python.tool.PythonREPLTool", "langchain.agents.load_tools", "langchain.chat_models.ChatOpenAI" ]
[((315, 348), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (338, 348), False, 'import warnings\n'), ((735, 761), 'datetime.date', 'datetime.date', (['(2024)', '(6)', '(12)'], {}), '(2024, 6, 12)\n', (748, 761), False, 'import datetime\n'), ((1324, 1366), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': 'llm_model'}), '(temperature=0, model=llm_model)\n', (1334, 1366), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1388, 1434), 'langchain.agents.load_tools', 'load_tools', (["['llm-math', 'wikipedia']"], {'llm': 'llm'}), "(['llm-math', 'wikipedia'], llm=llm)\n", (1398, 1434), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((1454, 1579), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'handle_parsing_errors': '(True)', 'verbose': '(True)'}), '(tools, llm, agent=AgentType.\n CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose=True)\n', (1470, 1579), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((3139, 3273), 'langchain.agents.initialize_agent', 'initialize_agent', (['(tools + [time])', 'llm'], {'agent': 'AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'handle_parsing_errors': '(True)', 'verbose': '(True)'}), '(tools + [time], llm, agent=AgentType.\n CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose=True)\n', (3155, 3273), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((260, 273), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (271, 273), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((616, 639), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (637, 639), False, 'import datetime\n'), ((1952, 1968), 'langchain.tools.python.tool.PythonREPLTool', 'PythonREPLTool', ([], {}), '()\n', (1966, 1968), False, 'from langchain.tools.python.tool import PythonREPLTool\n'), ((3105, 3117), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3115, 3117), False, 'from datetime import date\n')]
import sys import pandas as pd from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex from llama_index.indices.base import BaseIndex from llama_index.storage.docstore import SimpleDocumentStore from llama_index.storage.index_store import SimpleIndexStore from llama_index.vector_stores import SimpleVectorStore from config import ( API_KEY, DEPLOYMENT_NAME, MODEL_NAME, API_BASE, API_VERSION, EMBEDDING_MODEL, EMBEDDING_DEPLOYMENT, ) class LlamaQueryEngine: def __init__( self, api_key=API_KEY, deployment_name=DEPLOYMENT_NAME, model_name=MODEL_NAME, api_base=API_BASE, api_version=API_VERSION, embedding_model=EMBEDDING_MODEL, embedding_deployment=EMBEDDING_DEPLOYMENT, ): import openai import logging import os from langchain.embeddings import OpenAIEmbeddings from llama_index.llms import AzureOpenAI from llama_index import LangchainEmbedding from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext logging.basicConfig(stream=sys.stdout, level=logging.INFO) openai.api_type = "azure" openai.api_base = api_base openai.api_version = api_version os.environ["OPENAI_API_KEY"] = api_key openai.api_key = os.getenv("OPENAI_API_KEY") llm = AzureOpenAI( deployment_name=deployment_name, model=model_name, temperature=0, engine="gpt35", max_tokens=2048 ) embedding_llm = LangchainEmbedding( OpenAIEmbeddings( model=embedding_model, deployment=embedding_deployment, openai_api_key=openai.api_key, openai_api_base=openai.api_base, openai_api_type=openai.api_type, openai_api_version=openai.api_version, ), embed_batch_size=1, ) service_context = ServiceContext.from_defaults( llm=llm, embed_model=embedding_llm, ) set_global_service_context(service_context) # index = VectorStoreIndex.from_documents(documents) # self.index = index # self.query_engine = index.as_query_engine() self.index = None self.query_engine = None def load_doc_from_csv(self, csv_path, text_column="decoded_readme", max_docs=20, is_persist=False, has_persist=False, persist_dir="app/data/persist"): if has_persist: self.retrieve_index(persist_dir) return df = pd.read_csv(csv_path) text_list = df[text_column].tolist() text_list = text_list[:max_docs] documents = [Document(text=t) for t in text_list] index = VectorStoreIndex.from_documents(documents) self.index = index from llama_index.indices.postprocessor import SimilarityPostprocessor from llama_index.query_engine import RetrieverQueryEngine from llama_index.indices.vector_store import VectorIndexRetriever from llama_index import get_response_synthesizer # configure retriever retriever = VectorIndexRetriever( index=index, similarity_top_k=2, ) # configure response synthesizer response_synthesizer = get_response_synthesizer() # assemble query engine query_engine = RetrieverQueryEngine( retriever=retriever, response_synthesizer=response_synthesizer, node_postprocessors=[ SimilarityPostprocessor(similarity_cutoff=0.7) ] ) self.query_engine = query_engine # self.query_engine = index.as_query_engine() if is_persist: self.persist_index(persist_dir) def retrieve_index(self, persist_dir): storage_context = StorageContext.from_defaults( docstore=SimpleDocumentStore.from_persist_dir(persist_dir=persist_dir), vector_store=SimpleVectorStore.from_persist_dir(persist_dir=persist_dir), index_store=SimpleIndexStore.from_persist_dir(persist_dir=persist_dir), ) self.index = load_index_from_storage(storage_context) self.query_engine = self.index.as_query_engine() def persist_index(self, persist_dir): self.index.storage_context.persist(persist_dir=persist_dir) def query(self, query_text): if not self.query_engine: raise Exception("No query engine loaded") return self.query_engine.query(query_text) def get_index(self): return self.index
[ "langchain.embeddings.OpenAIEmbeddings" ]
[((1194, 1252), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (1213, 1252), False, 'import logging\n'), ((1435, 1462), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1444, 1462), False, 'import os\n'), ((1478, 1593), 'llama_index.llms.AzureOpenAI', 'AzureOpenAI', ([], {'deployment_name': 'deployment_name', 'model': 'model_name', 'temperature': '(0)', 'engine': '"""gpt35"""', 'max_tokens': '(2048)'}), "(deployment_name=deployment_name, model=model_name, temperature=\n 0, engine='gpt35', max_tokens=2048)\n", (1489, 1593), False, 'from llama_index.llms import AzureOpenAI\n'), ((2106, 2170), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embedding_llm'}), '(llm=llm, embed_model=embedding_llm)\n', (2134, 2170), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((2215, 2258), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2241, 2258), False, 'from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex\n'), ((2719, 2740), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (2730, 2740), True, 'import pandas as pd\n'), ((2901, 2943), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2932, 2943), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((3298, 3351), 'llama_index.indices.vector_store.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(2)'}), '(index=index, similarity_top_k=2)\n', (3318, 3351), False, 'from llama_index.indices.vector_store import VectorIndexRetriever\n'), ((3460, 3486), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (3484, 3486), False, 'from llama_index import get_response_synthesizer\n'), ((4321, 4361), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (4344, 4361), False, 'from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex\n'), ((1716, 1932), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': 'embedding_model', 'deployment': 'embedding_deployment', 'openai_api_key': 'openai.api_key', 'openai_api_base': 'openai.api_base', 'openai_api_type': 'openai.api_type', 'openai_api_version': 'openai.api_version'}), '(model=embedding_model, deployment=embedding_deployment,\n openai_api_key=openai.api_key, openai_api_base=openai.api_base,\n openai_api_type=openai.api_type, openai_api_version=openai.api_version)\n', (1732, 1932), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2848, 2864), 'llama_index.Document', 'Document', ([], {'text': 't'}), '(text=t)\n', (2856, 2864), False, 'from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex\n'), ((4057, 4118), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (4093, 4118), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((4145, 4204), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (4179, 4204), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((4230, 4288), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (4263, 4288), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((3703, 3749), 'llama_index.indices.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': '(0.7)'}), '(similarity_cutoff=0.7)\n', (3726, 3749), False, 'from llama_index.indices.postprocessor import SimilarityPostprocessor\n')]
import arxiv import openai import langchain import pinecone from langchain_community.document_loaders import ArxivLoader from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Pinecone from langchain.llms import OpenAI from langchain import PromptTemplate from langchain.chat_models import ChatOpenAI from langchain.chains.summarize import load_summarize_chain from langchain.chains.question_answering import load_qa_chain from langchain import OpenAI from utils import * import streamlit as st import os from dotenv import load_dotenv load_dotenv() openai_api_key = os.getenv('OPENAI_API_KEY') pinecone_api_key = os.getenv('PINECONE_API_KEY') environment = os.getenv('PINECONE_ENV') llm_summary = ChatOpenAI(temperature=0.3, model_name="gpt-3.5-turbo-0125") llm = OpenAI(model_name="gpt-3.5-turbo-0125", temperature=0.6, api_key=openai_api_key) if 'summary' not in st.session_state: st.session_state.summary = None if 'documents' not in st.session_state: st.session_state.documents = None st.title('Arxiv Paper Summarizer and Interactive Q&A') paper_id_input = st.text_input('Enter Arxiv Paper ID', '') if st.button('Summarize Paper') and paper_id_input: with st.spinner('Fetching and summarizing the paper...'): try: doc = arxiv_loader(paper_id=paper_id_input) st.session_state.documents = chunk_data(docs=doc) # st.write(st.session_state.documents) chain = load_summarize_chain( llm=llm_summary, chain_type='map_reduce', verbose=False ) summary = chain.run(st.session_state.documents) st.subheader('Summary') st.write(summary) except Exception as e: st.error(f"An error occurred: {e}") def initialize_index(index_name='arxiv-summarizer'): # documents = chunk_data(docs=doc) embeddings = OpenAIEmbeddings(api_key=openai_api_key) index_name = index_name # Make sure environment is correctly spelled (there was a typo in your provided code) pinecone.Pinecone( api_key=pinecone_api_key, environment=environment ) if st.session_state.documents: index = Pinecone.from_documents(st.session_state.documents, embeddings, index_name=index_name) else: index = None return index index = initialize_index() def retrieve_query(query, k=2): matching_results = index.similarity_search(query, k=k) return matching_results def retrieve_answers(query): chain = load_qa_chain(llm, chain_type='stuff') doc_search = retrieve_query(query) print(doc_search) response = chain.run(input_documents=doc_search, question=query) return response if paper_id_input: user_query = st.text_input("Ask a question about the paper:", '') if user_query: if st.button('Get Answer'): with st.spinner('Retrieving your answer...'): try: answer = retrieve_answers(user_query) st.subheader('Answer') st.write(answer) except Exception as e: st.error(f"An error occurred while retrieving the answer: {e}")
[ "langchain.chains.question_answering.load_qa_chain", "langchain.chains.summarize.load_summarize_chain", "langchain.vectorstores.Pinecone.from_documents", "langchain.chat_models.ChatOpenAI", "langchain.OpenAI", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((690, 703), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (701, 703), False, 'from dotenv import load_dotenv\n'), ((722, 749), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (731, 749), False, 'import os\n'), ((769, 798), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (778, 798), False, 'import os\n'), ((813, 838), 'os.getenv', 'os.getenv', (['"""PINECONE_ENV"""'], {}), "('PINECONE_ENV')\n", (822, 838), False, 'import os\n'), ((854, 914), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.3)', 'model_name': '"""gpt-3.5-turbo-0125"""'}), "(temperature=0.3, model_name='gpt-3.5-turbo-0125')\n", (864, 914), False, 'from langchain.chat_models import ChatOpenAI\n'), ((925, 1010), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-0125"""', 'temperature': '(0.6)', 'api_key': 'openai_api_key'}), "(model_name='gpt-3.5-turbo-0125', temperature=0.6, api_key=openai_api_key\n )\n", (931, 1010), False, 'from langchain import OpenAI\n'), ((1162, 1216), 'streamlit.title', 'st.title', (['"""Arxiv Paper Summarizer and Interactive Q&A"""'], {}), "('Arxiv Paper Summarizer and Interactive Q&A')\n", (1170, 1216), True, 'import streamlit as st\n'), ((1235, 1276), 'streamlit.text_input', 'st.text_input', (['"""Enter Arxiv Paper ID"""', '""""""'], {}), "('Enter Arxiv Paper ID', '')\n", (1248, 1276), True, 'import streamlit as st\n'), ((1281, 1309), 'streamlit.button', 'st.button', (['"""Summarize Paper"""'], {}), "('Summarize Paper')\n", (1290, 1309), True, 'import streamlit as st\n'), ((2076, 2116), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'api_key': 'openai_api_key'}), '(api_key=openai_api_key)\n', (2092, 2116), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2239, 2307), 'pinecone.Pinecone', 'pinecone.Pinecone', ([], {'api_key': 'pinecone_api_key', 'environment': 'environment'}), '(api_key=pinecone_api_key, environment=environment)\n', (2256, 2307), False, 'import pinecone\n'), ((2706, 2744), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (2719, 2744), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2932, 2984), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about the paper:"""', '""""""'], {}), "('Ask a question about the paper:', '')\n", (2945, 2984), True, 'import streamlit as st\n'), ((1339, 1390), 'streamlit.spinner', 'st.spinner', (['"""Fetching and summarizing the paper..."""'], {}), "('Fetching and summarizing the paper...')\n", (1349, 1390), True, 'import streamlit as st\n'), ((2381, 2472), 'langchain.vectorstores.Pinecone.from_documents', 'Pinecone.from_documents', (['st.session_state.documents', 'embeddings'], {'index_name': 'index_name'}), '(st.session_state.documents, embeddings, index_name=\n index_name)\n', (2404, 2472), False, 'from langchain.vectorstores import Pinecone\n'), ((3020, 3043), 'streamlit.button', 'st.button', (['"""Get Answer"""'], {}), "('Get Answer')\n", (3029, 3043), True, 'import streamlit as st\n'), ((1593, 1670), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'llm_summary', 'chain_type': '"""map_reduce"""', 'verbose': '(False)'}), "(llm=llm_summary, chain_type='map_reduce', verbose=False)\n", (1613, 1670), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1821, 1844), 'streamlit.subheader', 'st.subheader', (['"""Summary"""'], {}), "('Summary')\n", (1833, 1844), True, 'import streamlit as st\n'), ((1857, 1874), 'streamlit.write', 'st.write', (['summary'], {}), '(summary)\n', (1865, 1874), True, 'import streamlit as st\n'), ((1918, 1953), 'streamlit.error', 'st.error', (['f"""An error occurred: {e}"""'], {}), "(f'An error occurred: {e}')\n", (1926, 1953), True, 'import streamlit as st\n'), ((3062, 3101), 'streamlit.spinner', 'st.spinner', (['"""Retrieving your answer..."""'], {}), "('Retrieving your answer...')\n", (3072, 3101), True, 'import streamlit as st\n'), ((3202, 3224), 'streamlit.subheader', 'st.subheader', (['"""Answer"""'], {}), "('Answer')\n", (3214, 3224), True, 'import streamlit as st\n'), ((3245, 3261), 'streamlit.write', 'st.write', (['answer'], {}), '(answer)\n', (3253, 3261), True, 'import streamlit as st\n'), ((3321, 3384), 'streamlit.error', 'st.error', (['f"""An error occurred while retrieving the answer: {e}"""'], {}), "(f'An error occurred while retrieving the answer: {e}')\n", (3329, 3384), True, 'import streamlit as st\n')]
"""Create a ChatVectorDBChain for question/answering.""" from langchain.callbacks.manager import AsyncCallbackManager from langchain.callbacks.tracers import LangChainTracer from langchain.chains import ChatVectorDBChain from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT, QA_PROMPT) from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.chat_models import ChatOpenAI from langchain.llms import OpenAI from langchain.vectorstores import Pinecone from langchain.chains import ConversationalRetrievalChain from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) # system_template = """Use the following pieces of context to answer the users question. # If you don't know the answer, just say that you don't know, don't try to make up an answer. # ---------------- # {context}""" template = """You are a helpful AI assistant that answers questions about an e-commerce company called "Sindabad.com" in a friendly and polite manner. You will be given a context that will represent Sindabad.com's product inventory. Users might ask about products, they might want to know your suggestions as well. Most importantly, they might ask about specific product and its associated product link. If they want to know about product links, you will provide it accordingly with the help of the given "Context". Answer the question in your own words as truthfully as possible from the context given to you. If you do not know the answer to the question, simply respond with "I don't know. Could you please rephrase the question?". If questions are asked where there is no relevant information available in the context, answer the question with your existing knowledge on that question and "ignore" the "Context" given to you. ---------------- context: {context}""" messages = [ SystemMessagePromptTemplate.from_template(template), HumanMessagePromptTemplate.from_template("{question}") ] prompt = ChatPromptTemplate.from_messages(messages) def get_chain( vectorstore: Pinecone, question_handler, stream_handler, tracing: bool = False ) -> ChatVectorDBChain: """Create a ChatVectorDBChain for question/answering.""" # Construct a ChatVectorDBChain with a streaming llm for combine docs # and a separate, non-streaming llm for question generation manager = AsyncCallbackManager([]) question_manager = AsyncCallbackManager([question_handler]) stream_manager = AsyncCallbackManager([stream_handler]) if tracing: tracer = LangChainTracer() tracer.load_default_session() manager.add_handler(tracer) question_manager.add_handler(tracer) stream_manager.add_handler(tracer) question_gen_llm = ChatOpenAI( temperature=0, verbose=True, callback_manager=question_manager, ) streaming_llm = ChatOpenAI( streaming=True, callback_manager=stream_manager, verbose=True, temperature=0, ) question_generator = LLMChain( llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager ) doc_chain = load_qa_chain( streaming_llm, chain_type="stuff", prompt=prompt, callback_manager=manager ) # qa = ChatVectorDBChain( # vectorstore=vectorstore, # combine_docs_chain=doc_chain, # question_generator=question_generator, # callback_manager=manager, # ) qa = ConversationalRetrievalChain( retriever=vectorstore.as_retriever(), combine_docs_chain=doc_chain, question_generator=question_generator, callback_manager=manager ) return qa
[ "langchain.chains.question_answering.load_qa_chain", "langchain.callbacks.tracers.LangChainTracer", "langchain.callbacks.manager.AsyncCallbackManager", "langchain.prompts.chat.SystemMessagePromptTemplate.from_template", "langchain.chat_models.ChatOpenAI", "langchain.chains.llm.LLMChain", "langchain.prompts.chat.HumanMessagePromptTemplate.from_template", "langchain.prompts.chat.ChatPromptTemplate.from_messages" ]
[((2109, 2151), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['messages'], {}), '(messages)\n', (2141, 2151), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1986, 2037), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (2027, 2037), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2043, 2097), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{question}"""'], {}), "('{question}')\n", (2083, 2097), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2501, 2525), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (2521, 2525), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2549, 2589), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[question_handler]'], {}), '([question_handler])\n', (2569, 2589), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2611, 2649), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (2631, 2649), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2887, 2961), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'verbose': '(True)', 'callback_manager': 'question_manager'}), '(temperature=0, verbose=True, callback_manager=question_manager)\n', (2897, 2961), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3013, 3105), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'callback_manager': 'stream_manager', 'verbose': '(True)', 'temperature': '(0)'}), '(streaming=True, callback_manager=stream_manager, verbose=True,\n temperature=0)\n', (3023, 3105), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3167, 3260), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'question_gen_llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'callback_manager': 'manager'}), '(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT,\n callback_manager=manager)\n', (3175, 3260), False, 'from langchain.chains.llm import LLMChain\n'), ((3303, 3396), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'chain_type': '"""stuff"""', 'prompt': 'prompt', 'callback_manager': 'manager'}), "(streaming_llm, chain_type='stuff', prompt=prompt,\n callback_manager=manager)\n", (3316, 3396), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2683, 2700), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (2698, 2700), False, 'from langchain.callbacks.tracers import LangChainTracer\n')]
import langchain_helper as lch # Import custom helper module for LangChain operations import streamlit as st # Import Streamlit for web app development # Set up the Streamlit web page title st.title("Data Structures Problems Generator") # Define the list of topics for data structure problems topic_options = [ "", "Recursion", "Stack", "Queue", "Linked List", "Priority Queue", "Hash Table", "Binary Tree", "Binary Search Tree", "Graph", "Depth-First Search", "Breadth-First Search" ] # Create a sidebar selection box in Streamlit for choosing a topic topic = st.sidebar.selectbox( "Choose a Topic for the Problem", topic_options) # Define the list of difficulty levels for the problems difficulty_levels = ["", "Easy", "Medium", "Hard"] # Create a sidebar selection box in Streamlit for choosing the difficulty level difficulty = st.sidebar.selectbox( "Choose a Difficulty Level", difficulty_levels) # Create buttons in the sidebar for submitting a problem request and for solving a problem submit_button = st.sidebar.button("Submit") solve_button = st.sidebar.button("Solve") # Handle the event when the 'Submit' button is clicked if submit_button and topic and difficulty: # Generate a data structure problem using the selected topic and difficulty response = lch.generate_DS_problem( topic=topic, difficulty=difficulty) # Store the generated problem in a variable lch.coding_problem = response["coding_problem"] # Display the generated problem on the web page st.subheader("Coding problem: ") st.markdown(lch.coding_problem) # Handle the event when the 'Solve' button is clicked if solve_button and lch.coding_problem: # Generate a solution for the stored coding problem solution = lch.generate_DS_solution(lch.coding_problem) # Extract the solution from the response solution = solution["coding_problem_solution"] # Display both the problem and its solution on the web page st.subheader("Coding problem: ") st.markdown(lch.coding_problem) st.subheader("Solution: ") st.markdown(solution)
[ "langchain_helper.generate_DS_solution", "langchain_helper.generate_DS_problem" ]
[((193, 239), 'streamlit.title', 'st.title', (['"""Data Structures Problems Generator"""'], {}), "('Data Structures Problems Generator')\n", (201, 239), True, 'import streamlit as st\n'), ((570, 639), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Choose a Topic for the Problem"""', 'topic_options'], {}), "('Choose a Topic for the Problem', topic_options)\n", (590, 639), True, 'import streamlit as st\n'), ((850, 918), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Choose a Difficulty Level"""', 'difficulty_levels'], {}), "('Choose a Difficulty Level', difficulty_levels)\n", (870, 918), True, 'import streamlit as st\n'), ((1036, 1063), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Submit"""'], {}), "('Submit')\n", (1053, 1063), True, 'import streamlit as st\n'), ((1079, 1105), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Solve"""'], {}), "('Solve')\n", (1096, 1105), True, 'import streamlit as st\n'), ((1300, 1359), 'langchain_helper.generate_DS_problem', 'lch.generate_DS_problem', ([], {'topic': 'topic', 'difficulty': 'difficulty'}), '(topic=topic, difficulty=difficulty)\n', (1323, 1359), True, 'import langchain_helper as lch\n'), ((1525, 1557), 'streamlit.subheader', 'st.subheader', (['"""Coding problem: """'], {}), "('Coding problem: ')\n", (1537, 1557), True, 'import streamlit as st\n'), ((1562, 1593), 'streamlit.markdown', 'st.markdown', (['lch.coding_problem'], {}), '(lch.coding_problem)\n', (1573, 1593), True, 'import streamlit as st\n'), ((1760, 1804), 'langchain_helper.generate_DS_solution', 'lch.generate_DS_solution', (['lch.coding_problem'], {}), '(lch.coding_problem)\n', (1784, 1804), True, 'import langchain_helper as lch\n'), ((1969, 2001), 'streamlit.subheader', 'st.subheader', (['"""Coding problem: """'], {}), "('Coding problem: ')\n", (1981, 2001), True, 'import streamlit as st\n'), ((2006, 2037), 'streamlit.markdown', 'st.markdown', (['lch.coding_problem'], {}), '(lch.coding_problem)\n', (2017, 2037), True, 'import streamlit as st\n'), ((2042, 2068), 'streamlit.subheader', 'st.subheader', (['"""Solution: """'], {}), "('Solution: ')\n", (2054, 2068), True, 'import streamlit as st\n'), ((2073, 2094), 'streamlit.markdown', 'st.markdown', (['solution'], {}), '(solution)\n', (2084, 2094), True, 'import streamlit as st\n')]
from llama_index.core import VectorStoreIndex,SimpleDirectoryReader,ServiceContext print("VectorStoreIndex,SimpleDirectoryReader,ServiceContext imported") from llama_index.llms.huggingface import HuggingFaceLLM print("HuggingFaceLLM imported") from llama_index.core.prompts.prompts import SimpleInputPrompt print("SimpleInputPrompt imported") from ctransformers import AutoModelForCausalLM print("AutoModelForCausalLM imported") from langchain.embeddings.huggingface import HuggingFaceEmbeddings print("HuggingFaceEmbeddings imported") from llama_index.core import ServiceContext print("ServiceContext imported") from llama_index.embeddings.langchain import LangchainEmbedding print("LangchainEmbedding imported") from langchain_community.document_loaders import PyPDFLoader print("PyPDFLoader imported") import json import torch import os from dotenv import load_dotenv load_dotenv() HuggingFace_Api = os.environ.get('HF_TOKEN') documents = SimpleDirectoryReader('./testing/docs').load_data() print("SimpleDirectoryReader imported") def get_system_prompt(): '''This function is used to load the system prompt from the prompts.json file''' with open('prompts.json') as f: data = json.load(f) return data['Default'] query_wrapper_prompt=SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>") def load_model(context_window: int, max_new_tokens: int): '''This function is used to load the model from the HuggingFaceLLM''' print(f"""Available Cuda: {torch.cuda.get_device_name()} \n Trying to load the model model""") try: llm = HuggingFaceLLM(context_window=context_window, max_new_tokens=max_new_tokens, generate_kwargs={"temperature": 0.0, "do_sample": False}, system_prompt=get_system_prompt(), query_wrapper_prompt=query_wrapper_prompt, tokenizer_name="./meta", model_name="./meta", device_map="cuda", # uncomment this if using CUDA to reduce memory usage model_kwargs={"torch_dtype": torch.float16,"load_in_8bit":True } ) print("Model Loaded") return llm except Exception as e: print(f"Error: {e}") return None def embed_model(): '''This function is used to load the model from the LangchainEmbedding''' embed = LangchainEmbedding( HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")) service_context=ServiceContext.from_defaults( chunk_size=1024, llm=load_model(context_window=4096, max_new_tokens=256), embed_model=embed ) return service_context def get_index(): '''This function is used to load the index from the VectorStoreIndex''' index=VectorStoreIndex.from_documents(documents,service_context=embed_model()) return index def main(): query_engine=get_index().as_query_engine() response=query_engine.query("what is this PDF tells about?") out = response print(response) if __name__ == "__main__": main()
[ "langchain.embeddings.huggingface.HuggingFaceEmbeddings" ]
[((872, 885), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (883, 885), False, 'from dotenv import load_dotenv\n'), ((905, 931), 'os.environ.get', 'os.environ.get', (['"""HF_TOKEN"""'], {}), "('HF_TOKEN')\n", (919, 931), False, 'import os\n'), ((1262, 1315), 'llama_index.core.prompts.prompts.SimpleInputPrompt', 'SimpleInputPrompt', (['"""<|USER|>{query_str}<|ASSISTANT|>"""'], {}), "('<|USER|>{query_str}<|ASSISTANT|>')\n", (1279, 1315), False, 'from llama_index.core.prompts.prompts import SimpleInputPrompt\n'), ((945, 984), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./testing/docs"""'], {}), "('./testing/docs')\n", (966, 984), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((1200, 1212), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1209, 1212), False, 'import json\n'), ((2523, 2598), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-mpnet-base-v2"""'}), "(model_name='sentence-transformers/all-mpnet-base-v2')\n", (2544, 2598), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1481, 1509), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', ([], {}), '()\n', (1507, 1509), False, 'import torch\n')]
import itertools from langchain.cache import InMemoryCache, SQLiteCache import langchain import pandas as pd from certa.utils import merge_sources import ellmer.models import ellmer.metrics from time import sleep, time import traceback from tqdm import tqdm cache = "sqlite" samples = 2 explanation_granularity = "attribute" # setup langchain cache if cache == "memory": langchain.llm_cache = InMemoryCache() elif cache == "sqlite": langchain.llm_cache = SQLiteCache(database_path=".langchain.db") llm_configs = [ {"model_type": "falcon", "model_name": "vilsonrodrigues/falcon-7b-instruct-sharded", "deployment_name": "local", "tag": "falcon"}, ] for llm_config in llm_configs: pase = llm = ellmer.models.GenericEllmer(explanation_granularity=explanation_granularity, verbose=True, deployment_name=llm_config['deployment_name'], temperature=0.01, model_name=llm_config['model_name'], model_type=llm_config['model_type'], prompts={"pase": "ellmer/prompts/lc_pase_llama2.txt"}) ptsew = ellmer.models.GenericEllmer(explanation_granularity=explanation_granularity, verbose=True, deployment_name=llm_config['deployment_name'], temperature=0.01, model_name=llm_config['model_name'], model_type=llm_config['model_type'], prompts={ "ptse": {"er": "ellmer/prompts/er.txt", "why": "ellmer/prompts/er-why.txt", "saliency": "ellmer/prompts/er-saliency-lc.txt", "cf": "ellmer/prompts/er-cf-lc.txt"}}) # for each dataset in deepmatcher datasets dataset_names = ['abt_buy', 'fodo_zaga', 'walmart_amazon'] base_dir = '/Users/tteofili/dev/cheapER/datasets/' for d in dataset_names: print(f'using dataset {d}') dataset_dir = '/'.join([base_dir, d]) lsource = pd.read_csv(dataset_dir + '/tableA.csv') rsource = pd.read_csv(dataset_dir + '/tableB.csv') gt = pd.read_csv(dataset_dir + '/train.csv') valid = pd.read_csv(dataset_dir + '/valid.csv') test = pd.read_csv(dataset_dir + '/test.csv') test_df = merge_sources(test, 'ltable_', 'rtable_', lsource, rsource, ['label'], []) ellmers = { "ptsew_" + llm_config['tag']: ptsew, "pase_" + llm_config['tag']: pase, } result_files = [] all_llm_results = dict() for key, llm in ellmers.items(): print(f'{key} on {d}') curr_llm_results = [] start_time = time() # generate predictions and explanations test_data_df = test_df[:samples] ranged = range(len(test_data_df)) for idx in tqdm(ranged, disable=False): try: rand_row = test_df.iloc[[idx]] ltuple, rtuple = ellmer.utils.get_tuples(rand_row) print(f'ltuple:\n{ltuple}\nrtuple:\n{rtuple}') answer_dictionary = llm.predict_and_explain(ltuple, rtuple) print(answer_dictionary) prediction = answer_dictionary['prediction'] saliency = answer_dictionary['saliency'] cfs = [answer_dictionary['cf']] curr_llm_results.append({"id": idx, "ltuple": ltuple, "rtuple": rtuple, "prediction": prediction, "label": rand_row['label'].values[0], "saliency": saliency, "cfs": cfs}) except Exception: traceback.print_exc() print(f'error, waiting...') sleep(10) start_time += 10
[ "langchain.cache.InMemoryCache", "langchain.cache.SQLiteCache" ]
[((399, 414), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (412, 414), False, 'from langchain.cache import InMemoryCache, SQLiteCache\n'), ((465, 507), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (476, 507), False, 'from langchain.cache import InMemoryCache, SQLiteCache\n'), ((2112, 2152), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/tableA.csv')"], {}), "(dataset_dir + '/tableA.csv')\n", (2123, 2152), True, 'import pandas as pd\n'), ((2171, 2211), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/tableB.csv')"], {}), "(dataset_dir + '/tableB.csv')\n", (2182, 2211), True, 'import pandas as pd\n'), ((2225, 2264), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/train.csv')"], {}), "(dataset_dir + '/train.csv')\n", (2236, 2264), True, 'import pandas as pd\n'), ((2281, 2320), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/valid.csv')"], {}), "(dataset_dir + '/valid.csv')\n", (2292, 2320), True, 'import pandas as pd\n'), ((2336, 2374), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/test.csv')"], {}), "(dataset_dir + '/test.csv')\n", (2347, 2374), True, 'import pandas as pd\n'), ((2393, 2467), 'certa.utils.merge_sources', 'merge_sources', (['test', '"""ltable_"""', '"""rtable_"""', 'lsource', 'rsource', "['label']", '[]'], {}), "(test, 'ltable_', 'rtable_', lsource, rsource, ['label'], [])\n", (2406, 2467), False, 'from certa.utils import merge_sources\n'), ((2790, 2796), 'time.time', 'time', ([], {}), '()\n', (2794, 2796), False, 'from time import sleep, time\n'), ((2964, 2991), 'tqdm.tqdm', 'tqdm', (['ranged'], {'disable': '(False)'}), '(ranged, disable=False)\n', (2968, 2991), False, 'from tqdm import tqdm\n'), ((3796, 3817), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3815, 3817), False, 'import traceback\n'), ((3886, 3895), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (3891, 3895), False, 'from time import sleep, time\n')]
import streamlit as st # Import the LangChain library import langchain # Load the AI model model = langchain.load_model("model.pkl") # Create a function to get the feedback from the AI model def get_feedback(statement): # Get the predictions from the AI model predictions = model.predict(statement) # Create a list of feedback feedback = [] for prediction in predictions: feedback.append(prediction["feedback"]) return feedback # Create a function to display the feedback def display_feedback(statement): # Get the feedback from the AI model feedback = get_feedback(statement) # Display the feedback to the user st.write("Here is the feedback from the AI model:") st.write(feedback) # Create a main function def main(): # Get the personal statement from the user statement = st.text_input("Enter your personal statement:") # Display the feedback to the user display_feedback(statement) # Run the main function if __name__ == "__main__": main() # print("Start!") # load_dotenv(find_dotenv()) # # pinecone.init(api_key=os.getenv("PINECONE_API_KEY"), environment=os.getenv("PINECONE_ENVIRONMENT")) # dataset_path = "./dataset.txt" # loader = TextLoader(dataset_path) # comments = loader.load_and_split() # embeddings = OpenAIEmbeddings(model_name="ada") # vectordb = Chroma.from_documents(comments, embedding=embeddings, persist_directory=".") # vectordb.persist() # memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) # # Assuming that GPT-4 is used for grammar, structure, and fact-checking # # and Claude is used for providing tips and encouraging students to do their own research # grammar_llm = OpenAI(temperature=0.8) # tips_llm = Claude(temperature=0.8) # grammar_qa = ConversationalRetrievalChain.from_llm(grammar_llm, vectordb.as_retriever(), memory=memory) # tips_qa = ConversationalRetrievalChain.from_llm(tips_llm, vectordb.as_retriever(), memory=memory) # st.title('AI Statement Reviewer') # user_input = st.text_area("Enter your personal statement here:") # if st.button('Get feedback'): # grammar_result = grammar_qa({"question": user_input}) # tips_result = tips_qa({"question": user_input}) # st.write("Grammar and Structure Feedback:") # st.write(grammar_result["answer"]) # st.write("Tips and Recommendations:") # st.write(tips_result["answer"])
[ "langchain.load_model" ]
[((101, 134), 'langchain.load_model', 'langchain.load_model', (['"""model.pkl"""'], {}), "('model.pkl')\n", (121, 134), False, 'import langchain\n'), ((667, 718), 'streamlit.write', 'st.write', (['"""Here is the feedback from the AI model:"""'], {}), "('Here is the feedback from the AI model:')\n", (675, 718), True, 'import streamlit as st\n'), ((723, 741), 'streamlit.write', 'st.write', (['feedback'], {}), '(feedback)\n', (731, 741), True, 'import streamlit as st\n'), ((843, 890), 'streamlit.text_input', 'st.text_input', (['"""Enter your personal statement:"""'], {}), "('Enter your personal statement:')\n", (856, 890), True, 'import streamlit as st\n')]
# TODO speed up by extracting resume in structure and job beore sending to gpt4 import re from bs4 import BeautifulSoup from pyppeteer import launch import uuid import time from PIL import Image import numpy as np from fastapi import FastAPI, File, UploadFile, Form from fastapi import Request from langchain.prompts import ChatPromptTemplate import json from prompts_json import json_schema, system_message_content_without_coverletter, system_message_structurize_json from langchain.chains.openai_functions import create_structured_output_chain import asyncio import concurrent.futures import threading from pydantic import BaseModel from fastapi.templating import Jinja2Templates from fastapi.responses import HTMLResponse from langchain.schema import AIMessage, HumanMessage, SystemMessage from langchain.chat_models import ChatOpenAI from langchain.chains import LLMChain import PyPDF2 import os import langchain from langchain.cache import InMemoryCache from langchain.cache import SQLiteCache from pathlib import Path from typing import Optional from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler from typing import Any, Dict, List from langchain.schema import LLMResult, HumanMessage # load env variables from dotenv import load_dotenv load_dotenv() # llm = ChatOpenAI(model='gpt-4', temperature=0.1, max_tokens=2000, top_p=1, frequency_penalty=0.0, presence_penalty=0.0, stop=["\n\n", "Human:", "System:"]) # llm = ChatOpenAI(model='gpt-3.5-turbo-16k', temperature=0.1) # llm = ChatOpenAI(model='gpt-4-0613', temperature=0.1) # llm = ChatOpenAI(model='gpt-3.5-turbo-16k-0613', temperature=0.1) # TODO change back to GPT-4 def get_pdf_content(pdf_path): pdf = fitz.open(pdf_path) text = "" for page in pdf: text += page.get_text() # TODO disable OCR because of package size # if not text.strip(): # reader = easyocr.Reader(['en']) # for page in pdf: # pix = page.get_pixmap() # img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) # text += ' '.join([t[1] for t in reader.readtext(np.array(img))]) return text def highlight_words_in_pdf(pdf_path, words_to_highlight): pdf = fitz.open(pdf_path) for word in words_to_highlight.split(","): word = word.strip() for page in pdf: text_instances = page.search_for(word) for inst in text_instances: highlight = page.add_highlight_annot(inst) temp_output_path = "/tmp/highlighted_output.pdf" pdf.save(temp_output_path) return temp_output_path def extract_text_from_pdf(file_path): with open(file_path, "rb") as file: reader = PyPDF2.PdfReader(file) text = "" for page in range(len(reader.pages)): text += reader.pages[page].extract_text() return text def handle_resume_upload(uploaded_file, resume_path): if uploaded_file is not None: file_details = {"FileName": uploaded_file.filename, "FileType": uploaded_file.content_type} if file_details["FileType"] == "application/pdf": file_path = resume_path / f"resume_{uuid.uuid4()}.pdf" with open(file_path, "wb") as f: f.write(uploaded_file.file.read()) return extract_text_from_pdf(file_path), None else: return None, "Please upload a valid PDF file." return None, None async def structurize_with_gpt(text, model_name='gpt-3.5-turbo-16k-0613', system_message_content=system_message_structurize_json): global system_message_structurize_json response = await generate_response( system_message_content, text, model_name) return response async def do_match(resume, job_desc): global llm, system_message_content_without_coverletter, json_schema, system_message_structurize_json # langchain.llm_cache = InMemoryCache() # We can do the same thing with a SQLite cache #TODO langchain.llm_cache = SQLiteCache(database_path=".langchain.db") # count number of words for resume and job description and print it # start timer for processing time from now until the end of the function start = time.time() print(" Lenth before structurize:") # counter number of words in resume and job description print("Length of resume: " + str(len(resume))) print("Length of job description: " + str(len(job_desc))) # job_desc = structurize_with_gpt(job_desc) # resume = structurize_with_gpt(resume) print(" Lenth after structurize:") print("Length of resume: " + str(len(resume))) print("Length of job description: " + str(len(job_desc))) system_message = SystemMessage( content=system_message_content_without_coverletter) human_message = HumanMessage( content=f"Resume:\n{resume}\njob description:\n{job_desc}") prompt = ChatPromptTemplate.from_messages( [ system_message, human_message ]) print(prompt) # structurize is good but makes it extremly slow with gpt4 # chain = create_structured_output_chain(json_schema, llm, prompt, verbose=True) chain = LLMChain(llm=llm, prompt=prompt) output = await chain.arun({}) end = time.time() # print processing time in seconds print("Processing time: " + str(end - start)) # output = json.loads(output) # get md first # convert to json # print (output) # # now convert to json # system_message = SystemMessage(content=system_message_convert_json) # human_message = HumanMessage( content=f"{output}") # prompt = ChatPromptTemplate.from_messages( # [ # system_message, # human_message # ] ) # # print (prompt) # # chain = create_structured_output_chain(json_schema, llm, prompt, verbose=True) # chain = LLMChain(llm=llm, prompt=prompt) # output = chain.run({}) return output # messages = [system_message, human_message] # result = llm(messages) # return result.content # URLS async def get_page_content(url): browser = await launch(handleSIGINT=False, handleSIGTERM=False, handleSIGHUP=False) page = await browser.newPage() await page.setViewport({'width': 1366, 'height': 768}) SAFARI_USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Safari/605.1.15" await page.setUserAgent(SAFARI_USER_AGENT) try: await page.goto(url, waitUntil="domcontentloaded", timeout=60000) except Exception as e: # It's better to catch a general exception for simplicity here. print(f"Error: {e}") await browser.close() return None content = await page.content() await browser.close() return content def get_clean_text_from_url(url): with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(run_in_thread, url) # call openai api to extract the relevant parts related to a job desciription and remove the rest clean_page = future.result() # if number of characters is greater than 3000: summarize the page # print size of the clean page message extracted_job = clean_page print("Length of clean page: " + str(len(clean_page))) if len(clean_page) > 3000: system_message_content = "You summarize a given page and extract the part related to the job description. Dont make up anything, just extract the relevant parts." response = generate_response( system_message_content, clean_page, "gpt-3.5-turbo-16k") extracted_job = response print("Length of clean page after Extraction: " + str(len(extracted_job))) # print size of the extracted job return extracted_job def run_in_thread(url): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: return loop.run_until_complete(get_clean_text_from_url_async(url)) finally: loop.close() async def get_clean_text_from_url_async(url): content = await get_page_content(url) if not content: return None # return None if there was an error soup = BeautifulSoup(content, 'html.parser') for script in soup(['script', 'style']): script.decompose() clean_text = soup.get_text() clean_text = re.sub(r'\n+', '\n', clean_text).strip() return clean_text # Usage example: # text_content = get_clean_text_from_url("https://www.example.com") # print(text_content) class MyCustomSyncHandler(BaseCallbackHandler): def on_llm_new_token(self, token: str, **kwargs) -> None: print(f"Sync handler being called in a `thread_pool_executor`: token: {token}") class MyCustomAsyncHandler(AsyncCallbackHandler): """Async callback handler that can be used to handle callbacks from langchain.""" async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when chain starts running.""" print("zzzz....") await asyncio.sleep(0.3) class_name = serialized["name"] print("Hi! I just woke up. Your llm is starting") async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when chain ends running.""" print("zzzz....") await asyncio.sleep(0.3) print("Hi! I just woke up. Your llm is ending") async def generate_response(system_message_content: str, human_message_content: str, model_name: str = 'gpt-3.5-turbo') -> AIMessage: """ Generates a response based on the given system and human messages. Args: - system_message_content (str): The content of the system message. - human_message_content (str): The content of the human message. - model_name (str): The name of the model to use. Defaults to 'gpt-4'. Returns: - AIMessage: The response generated by the LLM. """ llm = ChatOpenAI(model=model_name, callbacks=[MyCustomSyncHandler(), MyCustomAsyncHandler()]) # Create SystemMessage system_message = SystemMessage(content=system_message_content) # Create HumanMessage human_message = HumanMessage(content=human_message_content) # Create messages list messages = [system_message, human_message] result = await llm.agenerate([messages]) result = result.generations[0][0].text print (result) # result = llm(messages) return result
[ "langchain.schema.HumanMessage", "langchain.cache.SQLiteCache", "langchain.prompts.ChatPromptTemplate.from_messages", "langchain.chat_models.ChatOpenAI", "langchain.schema.SystemMessage", "langchain.chains.LLMChain" ]
[((1278, 1291), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1289, 1291), False, 'from dotenv import load_dotenv\n'), ((1523, 1570), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4-0613"""', 'temperature': '(0.1)'}), "(model='gpt-4-0613', temperature=0.1)\n", (1533, 1570), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4022, 4064), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (4033, 4064), False, 'from langchain.cache import SQLiteCache\n'), ((4229, 4240), 'time.time', 'time.time', ([], {}), '()\n', (4238, 4240), False, 'import time\n'), ((4736, 4801), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message_content_without_coverletter'}), '(content=system_message_content_without_coverletter)\n', (4749, 4801), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((4831, 4904), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'f"""Resume:\n{resume}\njob description:\n{job_desc}"""'}), '(content=f"""Resume:\n{resume}\njob description:\n{job_desc}""")\n', (4843, 4904), False, 'from langchain.schema import LLMResult, HumanMessage\n'), ((4926, 4991), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message, human_message]'], {}), '([system_message, human_message])\n', (4958, 4991), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((5215, 5247), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (5223, 5247), False, 'from langchain.chains import LLMChain\n'), ((5293, 5304), 'time.time', 'time.time', ([], {}), '()\n', (5302, 5304), False, 'import time\n'), ((7925, 7949), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (7947, 7949), False, 'import asyncio\n'), ((7954, 7982), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (7976, 7982), False, 'import asyncio\n'), ((8281, 8318), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (8294, 8318), False, 'from bs4 import BeautifulSoup\n'), ((10172, 10217), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message_content'}), '(content=system_message_content)\n', (10185, 10217), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((10265, 10308), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_message_content'}), '(content=human_message_content)\n', (10277, 10308), False, 'from langchain.schema import LLMResult, HumanMessage\n'), ((2710, 2732), 'PyPDF2.PdfReader', 'PyPDF2.PdfReader', (['file'], {}), '(file)\n', (2726, 2732), False, 'import PyPDF2\n'), ((6148, 6215), 'pyppeteer.launch', 'launch', ([], {'handleSIGINT': '(False)', 'handleSIGTERM': '(False)', 'handleSIGHUP': '(False)'}), '(handleSIGINT=False, handleSIGTERM=False, handleSIGHUP=False)\n', (6154, 6215), False, 'from pyppeteer import launch\n'), ((8443, 8475), 're.sub', 're.sub', (['"""\\\\n+"""', '"""\n"""', 'clean_text'], {}), "('\\\\n+', '\\n', clean_text)\n", (8449, 8475), False, 'import re\n'), ((9158, 9176), 'asyncio.sleep', 'asyncio.sleep', (['(0.3)'], {}), '(0.3)\n', (9171, 9176), False, 'import asyncio\n'), ((9435, 9453), 'asyncio.sleep', 'asyncio.sleep', (['(0.3)'], {}), '(0.3)\n', (9448, 9453), False, 'import asyncio\n'), ((3187, 3199), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3197, 3199), False, 'import uuid\n')]
# Blackboard-PAGI - LLM Proto-AGI using the Blackboard Pattern # Copyright (c) 2023. Andreas Kirsch # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from typing import List, Optional import langchain from langchain.chat_models import ChatOpenAI from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, Generation class CachedChatOpenAI(ChatOpenAI): def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]] = None) -> ChatResult: messages_prompt = repr(messages) if langchain.llm_cache: results = langchain.llm_cache.lookup(messages_prompt, self.model_name) if results: assert len(results) == 1 result: Generation = results[0] chat_result = ChatResult( generations=[ChatGeneration(message=AIMessage(content=result.text))], llm_output=result.generation_info, ) return chat_result chat_result = super()._generate(messages, stop) if langchain.llm_cache: assert len(chat_result.generations) == 1 result = Generation(text=chat_result.generations[0].message.content, generation_info=chat_result.llm_output) langchain.llm_cache.update(messages_prompt, self.model_name, [result]) return chat_result
[ "langchain.schema.AIMessage", "langchain.llm_cache.update", "langchain.schema.Generation", "langchain.llm_cache.lookup" ]
[((1195, 1255), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['messages_prompt', 'self.model_name'], {}), '(messages_prompt, self.model_name)\n', (1221, 1255), False, 'import langchain\n'), ((1771, 1875), 'langchain.schema.Generation', 'Generation', ([], {'text': 'chat_result.generations[0].message.content', 'generation_info': 'chat_result.llm_output'}), '(text=chat_result.generations[0].message.content, generation_info\n =chat_result.llm_output)\n', (1781, 1875), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, Generation\n'), ((1883, 1953), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['messages_prompt', 'self.model_name', '[result]'], {}), '(messages_prompt, self.model_name, [result])\n', (1909, 1953), False, 'import langchain\n'), ((1467, 1497), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'result.text'}), '(content=result.text)\n', (1476, 1497), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, Generation\n')]
import streamlit as st import langchain # from dotenv import load_dotenv from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings from langchain.vectorstores import FAISS from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory from langchain.chains import ConversationalRetrievalChain from htmlTemplates import css, bot_template, user_template import openai import os from PyPDF2 import PdfReader from docx import Document import pinecone import time from langchain.vectorstores import Pinecone import toml def main(): # load_dotenv() st.set_page_config(page_title="Chat with multiple files", page_icon=":books:") st.write(css, unsafe_allow_html=True) openai.api_key = st.secrets["OPENAI_API_KEY"] pinecone_api_key = st.secrets["PINECONE_API_KEY"] pinecone_env = st.secrets["PINECONE_ENV"] index_name = st.secrets["PINECONE_INDEX_NAME"] if "conversation" not in st.session_state: st.session_state.conversation = None if "chat_history" not in st.session_state: st.session_state.chat_history = None st.header("Chat with multiple files :books:") if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) user_question = st.chat_input("What is up?") if user_question: if st.session_state.conversation is None: st.error("Please provide data and click 'Process' before asking questions.") else: response = st.session_state.conversation({'question': user_question}) st.session_state.chat_history = response['chat_history'] for i, message in enumerate(st.session_state.chat_history): if i % 2 == 0: st.write(user_template.replace("{{MSG}}", message.content), unsafe_allow_html=True) else: st.write(bot_template.replace("{{MSG}}", message.content), unsafe_allow_html=True) selected_tab = st.sidebar.radio("Navigation", options=["Files", "Text"], horizontal=True, label_visibility="collapsed") if selected_tab == "Files": st.sidebar.subheader("Upload and Process Files") uploaded_files = st.sidebar.file_uploader("Upload your files here and click on 'Process'", accept_multiple_files=True) if uploaded_files: if st.sidebar.button("Process"): with st.spinner("Processing"): # initialize pinecone pinecone.init(api_key=pinecone_api_key, environment=pinecone_env) if index_name in pinecone.list_indexes(): pinecone.delete_index(index_name) # we create a new index pinecone.create_index(name=index_name, metric='cosine', dimension=1536) # 1536 dim of text-embedding-ada-002 # wait for index to be initialized while not pinecone.describe_index(index_name).status['ready']: time.sleep(1) st.session_state.conversation = None st.session_state.chat_history = None text = "" for file in uploaded_files: file_extension = os.path.splitext(file.name)[1].lower() if file_extension == '.pdf': pdf_reader = PdfReader(file) for page in pdf_reader.pages: text += page.extract_text() elif file_extension == '.txt': text += file.read().decode("utf-8") elif file_extension == '.docx': doc = Document(file) for paragraph in doc.paragraphs: text += paragraph.text + "\n" else: st.warning('We only support PDF, TXT and DOCX files') st.stop() text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len) text_chunks = text_splitter.split_text(text) embeddings = OpenAIEmbeddings() # vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings) vectorstore = Pinecone.from_texts(text_chunks, embeddings, index_name=index_name) llm = ChatOpenAI(model_name = 'gpt-4') memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True) conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory) st.session_state.conversation = conversation_chain elif selected_tab == "Text": st.sidebar.subheader("Enter Text") user_text = st.sidebar.text_area("Enter your text here", "") if st.sidebar.button("Process Text"): if not user_text.strip(): st.warning("Please enter some text before processing.") else: # Process the user's entered text if user_text: # total_character_count = len(user_text) # if total_character_count > 400000: # st.warning("Total input data should not exceed 400,000 characters.") # st.stop() st.session_state.conversation = None st.session_state.chat_history = None # initialize pinecone pinecone.init(api_key=pinecone_api_key, environment=pinecone_env) if index_name in pinecone.list_indexes(): pinecone.delete_index(index_name) # we create a new index pinecone.create_index(name=index_name, metric='cosine', dimension=1536) # 1536 dim of text-embedding-ada-002 # wait for index to be initialized while not pinecone.describe_index(index_name).status['ready']: time.sleep(1) st.session_state.conversation = None st.session_state.chat_history = None text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len) text_chunks = text_splitter.split_text(user_text) embeddings = OpenAIEmbeddings() vectorstore = Pinecone.from_texts(text_chunks, embeddings, index_name=index_name) llm = ChatOpenAI(model_name = 'gpt-4') memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True) conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory) st.session_state.conversation = conversation_chain if __name__ == '__main__': main()
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.vectorstores.Pinecone.from_texts", "langchain.memory.ConversationBufferMemory", "langchain.chat_models.ChatOpenAI", "langchain.embeddings.OpenAIEmbeddings" ]
[((669, 747), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with multiple files"""', 'page_icon': '""":books:"""'}), "(page_title='Chat with multiple files', page_icon=':books:')\n", (687, 747), True, 'import streamlit as st\n'), ((752, 789), 'streamlit.write', 'st.write', (['css'], {'unsafe_allow_html': '(True)'}), '(css, unsafe_allow_html=True)\n', (760, 789), True, 'import streamlit as st\n'), ((1181, 1226), 'streamlit.header', 'st.header', (['"""Chat with multiple files :books:"""'], {}), "('Chat with multiple files :books:')\n", (1190, 1226), True, 'import streamlit as st\n'), ((1469, 1497), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (1482, 1497), True, 'import streamlit as st\n'), ((2177, 2285), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Navigation"""'], {'options': "['Files', 'Text']", 'horizontal': '(True)', 'label_visibility': '"""collapsed"""'}), "('Navigation', options=['Files', 'Text'], horizontal=True,\n label_visibility='collapsed')\n", (2193, 2285), True, 'import streamlit as st\n'), ((2323, 2371), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Upload and Process Files"""'], {}), "('Upload and Process Files')\n", (2343, 2371), True, 'import streamlit as st\n'), ((2397, 2502), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload your files here and click on \'Process\'"""'], {'accept_multiple_files': '(True)'}), '("Upload your files here and click on \'Process\'",\n accept_multiple_files=True)\n', (2421, 2502), True, 'import streamlit as st\n'), ((1370, 1402), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (1385, 1402), True, 'import streamlit as st\n'), ((1416, 1447), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (1427, 1447), True, 'import streamlit as st\n'), ((1582, 1658), 'streamlit.error', 'st.error', (['"""Please provide data and click \'Process\' before asking questions."""'], {}), '("Please provide data and click \'Process\' before asking questions.")\n', (1590, 1658), True, 'import streamlit as st\n'), ((1696, 1754), 'streamlit.session_state.conversation', 'st.session_state.conversation', (["{'question': user_question}"], {}), "({'question': user_question})\n", (1725, 1754), True, 'import streamlit as st\n'), ((2542, 2570), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Process"""'], {}), "('Process')\n", (2559, 2570), True, 'import streamlit as st\n'), ((5125, 5159), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Enter Text"""'], {}), "('Enter Text')\n", (5145, 5159), True, 'import streamlit as st\n'), ((5180, 5228), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter your text here"""', '""""""'], {}), "('Enter your text here', '')\n", (5200, 5228), True, 'import streamlit as st\n'), ((5241, 5274), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Process Text"""'], {}), "('Process Text')\n", (5258, 5274), True, 'import streamlit as st\n'), ((2593, 2617), 'streamlit.spinner', 'st.spinner', (['"""Processing"""'], {}), "('Processing')\n", (2603, 2617), True, 'import streamlit as st\n'), ((2682, 2747), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'pinecone_api_key', 'environment': 'pinecone_env'}), '(api_key=pinecone_api_key, environment=pinecone_env)\n', (2695, 2747), False, 'import pinecone\n'), ((2934, 3005), 'pinecone.create_index', 'pinecone.create_index', ([], {'name': 'index_name', 'metric': '"""cosine"""', 'dimension': '(1536)'}), "(name=index_name, metric='cosine', dimension=1536)\n", (2955, 3005), False, 'import pinecone\n'), ((4300, 4398), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (4321, 4398), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((4494, 4512), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4510, 4512), False, 'from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings\n'), ((4641, 4708), 'langchain.vectorstores.Pinecone.from_texts', 'Pinecone.from_texts', (['text_chunks', 'embeddings'], {'index_name': 'index_name'}), '(text_chunks, embeddings, index_name=index_name)\n', (4660, 4708), False, 'from langchain.vectorstores import Pinecone\n'), ((4735, 4765), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""'}), "(model_name='gpt-4')\n", (4745, 4765), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4797, 4870), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (4821, 4870), False, 'from langchain.memory import ConversationBufferMemory\n'), ((5330, 5385), 'streamlit.warning', 'st.warning', (['"""Please enter some text before processing."""'], {}), "('Please enter some text before processing.')\n", (5340, 5385), True, 'import streamlit as st\n'), ((1957, 2006), 'htmlTemplates.user_template.replace', 'user_template.replace', (['"""{{MSG}}"""', 'message.content'], {}), "('{{MSG}}', message.content)\n", (1978, 2006), False, 'from htmlTemplates import css, bot_template, user_template\n'), ((2083, 2131), 'htmlTemplates.bot_template.replace', 'bot_template.replace', (['"""{{MSG}}"""', 'message.content'], {}), "('{{MSG}}', message.content)\n", (2103, 2131), False, 'from htmlTemplates import css, bot_template, user_template\n'), ((2786, 2809), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (2807, 2809), False, 'import pinecone\n'), ((2835, 2868), 'pinecone.delete_index', 'pinecone.delete_index', (['index_name'], {}), '(index_name)\n', (2856, 2868), False, 'import pinecone\n'), ((3249, 3262), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3259, 3262), False, 'import time\n'), ((5911, 5976), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'pinecone_api_key', 'environment': 'pinecone_env'}), '(api_key=pinecone_api_key, environment=pinecone_env)\n', (5924, 5976), False, 'import pinecone\n'), ((6163, 6234), 'pinecone.create_index', 'pinecone.create_index', ([], {'name': 'index_name', 'metric': '"""cosine"""', 'dimension': '(1536)'}), "(name=index_name, metric='cosine', dimension=1536)\n", (6184, 6234), False, 'import pinecone\n'), ((6644, 6742), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (6665, 6742), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((6843, 6861), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (6859, 6861), False, 'from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings\n'), ((6896, 6963), 'langchain.vectorstores.Pinecone.from_texts', 'Pinecone.from_texts', (['text_chunks', 'embeddings'], {'index_name': 'index_name'}), '(text_chunks, embeddings, index_name=index_name)\n', (6915, 6963), False, 'from langchain.vectorstores import Pinecone\n'), ((6991, 7021), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""'}), "(model_name='gpt-4')\n", (7001, 7021), False, 'from langchain.chat_models import ChatOpenAI\n'), ((7053, 7126), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (7077, 7126), False, 'from langchain.memory import ConversationBufferMemory\n'), ((3632, 3647), 'PyPDF2.PdfReader', 'PdfReader', (['file'], {}), '(file)\n', (3641, 3647), False, 'from PyPDF2 import PdfReader\n'), ((6015, 6038), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (6036, 6038), False, 'import pinecone\n'), ((6064, 6097), 'pinecone.delete_index', 'pinecone.delete_index', (['index_name'], {}), '(index_name)\n', (6085, 6097), False, 'import pinecone\n'), ((6478, 6491), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6488, 6491), False, 'import time\n'), ((3172, 3207), 'pinecone.describe_index', 'pinecone.describe_index', (['index_name'], {}), '(index_name)\n', (3195, 3207), False, 'import pinecone\n'), ((3498, 3525), 'os.path.splitext', 'os.path.splitext', (['file.name'], {}), '(file.name)\n', (3514, 3525), False, 'import os\n'), ((3975, 3989), 'docx.Document', 'Document', (['file'], {}), '(file)\n', (3983, 3989), False, 'from docx import Document\n'), ((4171, 4224), 'streamlit.warning', 'st.warning', (['"""We only support PDF, TXT and DOCX files"""'], {}), "('We only support PDF, TXT and DOCX files')\n", (4181, 4224), True, 'import streamlit as st\n'), ((4253, 4262), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (4260, 4262), True, 'import streamlit as st\n'), ((6401, 6436), 'pinecone.describe_index', 'pinecone.describe_index', (['index_name'], {}), '(index_name)\n', (6424, 6436), False, 'import pinecone\n')]
# import langchain_experimental as lc from langchain_experimental.llms import FakeListLLM from langchain.llms.fake import FakeListLLM from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.agents import AgentType tools = load_tools(["python_repl"]) responses = ["Action: Python REPL\nAction Input: print(2 + 2)", "Final Answer: 4"] llm = FakeListLLM(responses=responses) agent = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) agent.run("whats 2 + 2")
[ "langchain.agents.initialize_agent", "langchain.llms.fake.FakeListLLM", "langchain.agents.load_tools" ]
[((271, 298), 'langchain.agents.load_tools', 'load_tools', (["['python_repl']"], {}), "(['python_repl'])\n", (281, 298), False, 'from langchain.agents import load_tools\n'), ((389, 421), 'langchain.llms.fake.FakeListLLM', 'FakeListLLM', ([], {'responses': 'responses'}), '(responses=responses)\n', (400, 421), False, 'from langchain.llms.fake import FakeListLLM\n'), ((431, 522), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)'}), '(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n verbose=True)\n', (447, 522), False, 'from langchain.agents import initialize_agent\n')]
from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from redundant_filter_retriever import RedundantFilterRetriever from dotenv import load_dotenv import langchain # langchain.debug = True load_dotenv() chat = ChatOpenAI() embeddings = OpenAIEmbeddings() db = Chroma( persist_directory="emb", embedding_function=embeddings ) retriever = RedundantFilterRetriever( embeddings=embeddings, chroma=db ) chain = RetrievalQA.from_chain_type( llm=chat, retriever=retriever, chain_type="stuff" ) result = chain.run("What is an interesting fact about the English language?") print("------------\n") print(result) print("\n------------\n")
[ "langchain.vectorstores.Chroma", "langchain.embeddings.OpenAIEmbeddings", "langchain.chains.RetrievalQA.from_chain_type", "langchain.chat_models.ChatOpenAI" ]
[((317, 330), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (328, 330), False, 'from dotenv import load_dotenv\n'), ((339, 351), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (349, 351), False, 'from langchain.chat_models import ChatOpenAI\n'), ((365, 383), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (381, 383), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((389, 451), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""emb"""', 'embedding_function': 'embeddings'}), "(persist_directory='emb', embedding_function=embeddings)\n", (395, 451), False, 'from langchain.vectorstores import Chroma\n'), ((474, 532), 'redundant_filter_retriever.RedundantFilterRetriever', 'RedundantFilterRetriever', ([], {'embeddings': 'embeddings', 'chroma': 'db'}), '(embeddings=embeddings, chroma=db)\n', (498, 532), False, 'from redundant_filter_retriever import RedundantFilterRetriever\n'), ((552, 630), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'retriever': 'retriever', 'chain_type': '"""stuff"""'}), "(llm=chat, retriever=retriever, chain_type='stuff')\n", (579, 630), False, 'from langchain.chains import RetrievalQA\n')]
####################### FUNCTION ############################# from langchainGPT import langchainProcessor from rabbitMQ import consumer_channel ################################################################## ################################################################## ####################### VARIABLE ############################# from envLoader import amqp_langchain_queue ################################################################## ################################################################## consumer_channel.queue_declare(queue=amqp_langchain_queue, durable=True) def callback(ch, method, properties, body): req = body.decode("utf-8") langchainProcessor(req) consumer_channel.basic_consume(amqp_langchain_queue, callback, auto_ack=True) consumer_channel.start_consuming()
[ "langchainGPT.langchainProcessor" ]
[((533, 605), 'rabbitMQ.consumer_channel.queue_declare', 'consumer_channel.queue_declare', ([], {'queue': 'amqp_langchain_queue', 'durable': '(True)'}), '(queue=amqp_langchain_queue, durable=True)\n', (563, 605), False, 'from rabbitMQ import consumer_channel\n'), ((713, 790), 'rabbitMQ.consumer_channel.basic_consume', 'consumer_channel.basic_consume', (['amqp_langchain_queue', 'callback'], {'auto_ack': '(True)'}), '(amqp_langchain_queue, callback, auto_ack=True)\n', (743, 790), False, 'from rabbitMQ import consumer_channel\n'), ((791, 825), 'rabbitMQ.consumer_channel.start_consuming', 'consumer_channel.start_consuming', ([], {}), '()\n', (823, 825), False, 'from rabbitMQ import consumer_channel\n'), ((687, 710), 'langchainGPT.langchainProcessor', 'langchainProcessor', (['req'], {}), '(req)\n', (705, 710), False, 'from langchainGPT import langchainProcessor\n')]
import json import logging import pathlib from typing import List, Tuple from langchain.text_splitter import CharacterTextSplitter import langchain import wandb from langchain.cache import SQLiteCache from langchain.docstore.document import Document from langchain.document_loaders import TextLoader from langchain.text_splitter import TextSplitter from langchain.vectorstores import Chroma , Qdrant from langchain.embeddings import OpenAIEmbeddings import os from dotenv import load_dotenv load_dotenv(".env") doc_dir = os.path.join("documents" , "iteration_1") vector_store_path = os.path.join("vector_store") prompt_file_path = os.path.join("autonomous_app" , "chat_prompt.json") langchain.llm_cache = SQLiteCache(database_path="llm_cache.db") logger = logging.getLogger(__name__) url="https://c48daa38-a7a7-4c89-9cd9-718a1461e4ae.us-east-1-0.aws.cloud.qdrant.io:6333" api_key_q = "2N5Wlg_y2xwgtJu0Iq5T5uHyesdewC3Vy-VX31YAq-laiQO1tCxAkg" def ingest_data(): chunk_size = 600 chunk_overlap = 200 # Load the documents documents = load_documents(doc_dir) print("Load Documents " , documents) # Split the documents into chunks chunked_documents = chunk_documents(documents, chunk_size, chunk_overlap) # Create the vector store with the chunked documents vector_store = create_vector_store(chunked_documents) return documents, vector_store def load_documents(data_dir:str) -> List[Document]: md_files = list(map(str, pathlib.Path(data_dir).glob("*.md"))) documents = [ TextLoader(file_path=file_path , encoding="utf8").load()[0] for file_path in md_files ] return documents def chunk_documents( documents: List[Document], chunk_size: int = 600, chunk_overlap=200 ) -> List[Document]: """Split documents into chunks Args: documents (List[Document]): A list of documents to split into chunks chunk_size (int, optional): The size of each chunk. Defaults to 500. chunk_overlap (int, optional): The number of tokens to overlap between chunks. Defaults to 0. Returns: List[Document]: A list of chunked documents. """ print("Before chunking " , documents) markdown_text_splitter = CharacterTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) split_documents = markdown_text_splitter.split_documents(documents) return split_documents def create_vector_store(chunk_documents) -> Qdrant: embedding_function = OpenAIEmbeddings() # user_vector_store_path = os.path.join(vector_store_path, user_key) # vector_store = Chroma.from_documents( # documents=documents, # embedding=embedding_function, # persist_directory=vector_store_path, # ) # # return vector_store print("Docs " , chunk_documents) vector_store = Qdrant.from_documents( chunk_documents, embedding_function, url=url, prefer_grpc=False, api_key=api_key_q, force_recreate=False, collection_name="my_documents", ) return vector_store # def log_prompt(prompt:dict, run:wandb.run): # prompt_artifact = wandb.Artifact(name="chat_prompt", type="prompt") # with prompt_artifact.new_file("prompt.json") as f: # f.write(json.dumps(prompt)) # run.log_artifact(prompt_artifact) # # def log_dataset(documents:List[Document], run:wandb.run): # document_artifact = wandb.Artifact(name="documentation_dataset", type="dataset") # with document_artifact.new_file("document.json") as f: # for document in documents: # f.write(document.json() + "\n") # run.log_artifact(document_artifact) # def log_index(vector_store_dir:str, run:wandb.run): # index_artifact = wandb.Artifact(name="vector_store", type="search_index") # index_artifact.add_dir(vector_store_dir) # run.log_artifact(index_artifact) # def ingest_and_log_data( # docs_dir: str = doc_dir, # chunk_size: int = 600, # chunk_overlap: int = 200, # vector_store_path: str = vector_store_path, # prompt_file_path: str = prompt_file_path, # wandb_project: str = "AI Agents Hackathon", # ): # """ # Ingest documentation data, create a vector store, and log artifacts to W&B. # Designed to be used within a Django context. # """ # run = wandb.init(project=wandb_project) # Move the wandb initialization to this function # # user_vector_store_path = os.path.join(vector_store_path, unique_user_key) # # # # Ingest data # documents = ingest_data( # docs_dir=docs_dir, # chunk_size=chunk_size, # chunk_overlap=chunk_overlap, # vector_store_path=vector_store_path, # wandb_project=wandb_project, # prompt_file=prompt_file_path, # ) # # # Log data to wandb # log_dataset(documents, run ) # log_index(vector_store_path, run) # # with open(prompt_file_path, 'r') as f: # prompt_data = json.load(f) # log_prompt(prompt_data, run) # # # Finish the wandb run # run.finish() ingest_data()
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.vectorstores.Qdrant.from_documents", "langchain.document_loaders.TextLoader", "langchain.cache.SQLiteCache", "langchain.embeddings.OpenAIEmbeddings" ]
[((491, 510), 'dotenv.load_dotenv', 'load_dotenv', (['""".env"""'], {}), "('.env')\n", (502, 510), False, 'from dotenv import load_dotenv\n'), ((523, 563), 'os.path.join', 'os.path.join', (['"""documents"""', '"""iteration_1"""'], {}), "('documents', 'iteration_1')\n", (535, 563), False, 'import os\n'), ((586, 614), 'os.path.join', 'os.path.join', (['"""vector_store"""'], {}), "('vector_store')\n", (598, 614), False, 'import os\n'), ((636, 686), 'os.path.join', 'os.path.join', (['"""autonomous_app"""', '"""chat_prompt.json"""'], {}), "('autonomous_app', 'chat_prompt.json')\n", (648, 686), False, 'import os\n'), ((711, 752), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '"""llm_cache.db"""'}), "(database_path='llm_cache.db')\n", (722, 752), False, 'from langchain.cache import SQLiteCache\n'), ((762, 789), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (779, 789), False, 'import logging\n'), ((2231, 2304), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2252, 2304), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((2497, 2515), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2513, 2515), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2853, 3020), 'langchain.vectorstores.Qdrant.from_documents', 'Qdrant.from_documents', (['chunk_documents', 'embedding_function'], {'url': 'url', 'prefer_grpc': '(False)', 'api_key': 'api_key_q', 'force_recreate': '(False)', 'collection_name': '"""my_documents"""'}), "(chunk_documents, embedding_function, url=url,\n prefer_grpc=False, api_key=api_key_q, force_recreate=False,\n collection_name='my_documents')\n", (2874, 3020), False, 'from langchain.vectorstores import Chroma, Qdrant\n'), ((1487, 1509), 'pathlib.Path', 'pathlib.Path', (['data_dir'], {}), '(data_dir)\n', (1499, 1509), False, 'import pathlib\n'), ((1551, 1599), 'langchain.document_loaders.TextLoader', 'TextLoader', ([], {'file_path': 'file_path', 'encoding': '"""utf8"""'}), "(file_path=file_path, encoding='utf8')\n", (1561, 1599), False, 'from langchain.document_loaders import TextLoader\n')]
import langchain from langchain.chains import LLMChain, SimpleSequentialChain, ConversationChain from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory langchain.verbose = True chat = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) conversation = ConversationChain( llm=chat, memory=ConversationBufferMemory() ) while True: user_message = input("You: ") ai_message = conversation.predict(input=user_message) print(f"AI: {ai_message}")
[ "langchain.memory.ConversationBufferMemory", "langchain.chat_models.ChatOpenAI" ]
[((231, 279), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo', temperature=0)\n", (241, 279), False, 'from langchain.chat_models import ChatOpenAI\n'), ((339, 365), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (363, 365), False, 'from langchain.memory import ConversationBufferMemory\n')]
import logging import sys import langchain from extract_100knocks_qa import extract_questions from langchain.chat_models import ChatOpenAI from llama_index import (GPTSQLStructStoreIndex, LLMPredictor, ServiceContext, SQLDatabase) from ruamel.yaml import YAML from sqlalchemy import create_engine verbose = True pgconfig = { 'host': 'localhost', 'port': 5432, 'database': 'dsdojo_db', 'user': 'padawan', 'password': 'padawan12345', } def main(): if verbose: logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) langchain.verbose = True # データベースに接続 database_url = 'postgresql://{user}:{password}@{host}:{port}/{database}'.format( **pgconfig) engine = create_engine(database_url) # LlamaIndexはデフォルトでtext-davinci-003を使うので、gpt-3.5-turboを使うよう設定 llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0) predictor = LLMPredictor(llm) service_context = ServiceContext.from_defaults(llm_predictor=predictor) # LlamaIndexのtext-to-SQLの準備 sql_database = SQLDatabase(engine) index = GPTSQLStructStoreIndex( [], service_context=service_context, sql_database=sql_database, ) # 問題の一覧を抽出 questions = extract_questions() yaml = YAML() yaml.default_style = '|' with open('results/result.yaml', 'w', encoding='utf-8') as f: # text-to-SQLを実行 for question in questions: try: response = index.query(question) answer = response.extra_info['sql_query'] qa = { 'question': question, 'answer': answer, } except Exception as e: qa = { 'question': question, 'error': str(e), } yaml.dump([qa], f) if __name__ == "__main__": main()
[ "langchain.chat_models.ChatOpenAI" ]
[((829, 856), 'sqlalchemy.create_engine', 'create_engine', (['database_url'], {}), '(database_url)\n', (842, 856), False, 'from sqlalchemy import create_engine\n'), ((934, 987), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (944, 987), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1004, 1021), 'llama_index.LLMPredictor', 'LLMPredictor', (['llm'], {}), '(llm)\n', (1016, 1021), False, 'from llama_index import GPTSQLStructStoreIndex, LLMPredictor, ServiceContext, SQLDatabase\n'), ((1044, 1097), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'predictor'}), '(llm_predictor=predictor)\n', (1072, 1097), False, 'from llama_index import GPTSQLStructStoreIndex, LLMPredictor, ServiceContext, SQLDatabase\n'), ((1150, 1169), 'llama_index.SQLDatabase', 'SQLDatabase', (['engine'], {}), '(engine)\n', (1161, 1169), False, 'from llama_index import GPTSQLStructStoreIndex, LLMPredictor, ServiceContext, SQLDatabase\n'), ((1182, 1273), 'llama_index.GPTSQLStructStoreIndex', 'GPTSQLStructStoreIndex', (['[]'], {'service_context': 'service_context', 'sql_database': 'sql_database'}), '([], service_context=service_context, sql_database=\n sql_database)\n', (1204, 1273), False, 'from llama_index import GPTSQLStructStoreIndex, LLMPredictor, ServiceContext, SQLDatabase\n'), ((1332, 1351), 'extract_100knocks_qa.extract_questions', 'extract_questions', ([], {}), '()\n', (1349, 1351), False, 'from extract_100knocks_qa import extract_questions\n'), ((1364, 1370), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (1368, 1370), False, 'from ruamel.yaml import YAML\n'), ((520, 579), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (539, 579), False, 'import logging\n'), ((619, 659), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (640, 659), False, 'import logging\n'), ((588, 607), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (605, 607), False, 'import logging\n')]
import os import openai from dotenv import load_dotenv import logging import re import hashlib from langchain.embeddings.openai import OpenAIEmbeddings from langchain.llms import AzureOpenAI from langchain.vectorstores.base import VectorStore from langchain.chains import ChatVectorDBChain from langchain.chains import ConversationalRetrievalChain from langchain.chains.qa_with_sources import load_qa_with_sources_chain from langchain.chains.llm import LLMChain from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT from langchain.prompts import PromptTemplate from langchain.document_loaders.base import BaseLoader from langchain.document_loaders import WebBaseLoader from langchain.text_splitter import TokenTextSplitter, TextSplitter from langchain.document_loaders.base import BaseLoader from langchain.document_loaders import TextLoader from langchain.chat_models import ChatOpenAI from langchain.schema import AIMessage, HumanMessage, SystemMessage from langchain.agents import initialize_agent, Tool, AgentType from utilities.formrecognizer import AzureFormRecognizerClient from utilities.azureblobstorage import AzureBlobStorageClient from utilities.translator import AzureTranslatorClient from utilities.customprompt import PROMPT from utilities.redis import RedisExtended from utilities.azuresearch import AzureSearch from utilities.NewAzureOpenAI import NewAzureOpenAI import langchain import pandas as pd import urllib from fake_useragent import UserAgent from utilities.tools import LifeKnowledgeSearchTool, IotDeviceControlTool, MansionPriceTool, LifeKnowledgeSearchConfig langchain.verbose = True class LLMHelper: def __init__(self, document_loaders : BaseLoader = None, text_splitter: TextSplitter = None, embeddings: OpenAIEmbeddings = None, llm: AzureOpenAI = None, temperature: float = None, max_tokens: int = None, custom_prompt: str = "", vector_store: VectorStore = None, k: int = None, pdf_parser: AzureFormRecognizerClient = None, blob_client: AzureBlobStorageClient = None, enable_translation: bool = False, translator: AzureTranslatorClient = None): load_dotenv() openai.api_type = "azure" openai.api_base = os.getenv('OPENAI_API_BASE') openai.api_version = "2023-03-15-preview" openai.api_key = os.getenv("OPENAI_API_KEY") # Azure OpenAI settings self.api_base = openai.api_base self.api_version = openai.api_version self.index_name: str = "embeddings" self.model: str = os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', "text-embedding-ada-002") self.deployment_name: str = os.getenv("OPENAI_ENGINE", os.getenv("OPENAI_ENGINES", "text-davinci-003")) self.deployment_type: str = os.getenv("OPENAI_DEPLOYMENT_TYPE", "Text") self.temperature: float = float(os.getenv("OPENAI_TEMPERATURE", 0.7)) if temperature is None else temperature self.max_tokens: int = int(os.getenv("OPENAI_MAX_TOKENS", -1)) if max_tokens is None else max_tokens self.prompt = PROMPT if custom_prompt == '' else PromptTemplate(template=custom_prompt, input_variables=["summaries", "question"]) self.vector_store_type = os.getenv("VECTOR_STORE_TYPE") # Azure Search settings if self.vector_store_type == "AzureSearch": self.vector_store_address: str = os.getenv('AZURE_SEARCH_SERVICE_NAME') self.vector_store_password: str = os.getenv('AZURE_SEARCH_ADMIN_KEY') else: # Vector store settings self.vector_store_address: str = os.getenv('REDIS_ADDRESS', "localhost") self.vector_store_port: int= int(os.getenv('REDIS_PORT', 6379)) self.vector_store_protocol: str = os.getenv("REDIS_PROTOCOL", "redis://") self.vector_store_password: str = os.getenv("REDIS_PASSWORD", None) if self.vector_store_password: self.vector_store_full_address = f"{self.vector_store_protocol}:{self.vector_store_password}@{self.vector_store_address}:{self.vector_store_port}" else: self.vector_store_full_address = f"{self.vector_store_protocol}{self.vector_store_address}:{self.vector_store_port}" self.chunk_size = int(os.getenv('CHUNK_SIZE', 500)) self.chunk_overlap = int(os.getenv('CHUNK_OVERLAP', 100)) self.document_loaders: BaseLoader = WebBaseLoader if document_loaders is None else document_loaders self.text_splitter: TextSplitter = TokenTextSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap) if text_splitter is None else text_splitter self.embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=self.model, chunk_size=1) if embeddings is None else embeddings if self.deployment_type == "Chat": self.llm: ChatOpenAI = ChatOpenAI(model_name=self.deployment_name, engine=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens if self.max_tokens != -1 else None) if llm is None else llm else: self.llm: AzureOpenAI = NewAzureOpenAI(deployment_name=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens) if llm is None else llm if self.vector_store_type == "AzureSearch": self.vector_store: VectorStore = AzureSearch(azure_cognitive_search_name=self.vector_store_address, azure_cognitive_search_key=self.vector_store_password, index_name=self.index_name, embedding_function=self.embeddings.embed_query) if vector_store is None else vector_store else: self.vector_store: RedisExtended = RedisExtended(redis_url=self.vector_store_full_address, index_name=self.index_name, embedding_function=self.embeddings.embed_query) if vector_store is None else vector_store self.k : int = 3 if k is None else k self.pdf_parser : AzureFormRecognizerClient = AzureFormRecognizerClient() if pdf_parser is None else pdf_parser self.blob_client: AzureBlobStorageClient = AzureBlobStorageClient() if blob_client is None else blob_client self.enable_translation : bool = False if enable_translation is None else enable_translation self.translator : AzureTranslatorClient = AzureTranslatorClient() if translator is None else translator self.user_agent: UserAgent() = UserAgent() self.user_agent.random self.current_contextDict = {} self.current_sources = None self.current_answer = None langchain.verbose = True def add_embeddings_lc(self, source_url): retrycount = 0 while True: try: documents = self.document_loaders(source_url).load() # Convert to UTF-8 encoding for non-ascii text for(document) in documents: try: if document.page_content.encode("iso-8859-1") == document.page_content.encode("latin-1"): document.page_content = document.page_content.encode("iso-8859-1").decode("utf-8", errors="ignore") except: pass docs = self.text_splitter.split_documents(documents) # Remove half non-ascii character from start/end of doc content (langchain TokenTextSplitter may split a non-ascii character in half) pattern = re.compile(r'[\x00-\x09\x0b\x0c\x0e-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]') # do not remove \x0a (\n) nor \x0d (\r) for(doc) in docs: doc.page_content = re.sub(pattern, '', doc.page_content) if doc.page_content == '': docs.remove(doc) keys = [] for i, doc in enumerate(docs): # Create a unique key for the document source_url = source_url.split('?')[0] filename = "/".join(source_url.split('/')[4:]) hash_key = hashlib.sha1(f"{source_url}_{i}".encode('utf-8')).hexdigest() hash_key = f"doc:{self.index_name}:{hash_key}" keys.append(hash_key) doc.metadata = {"source": f"[{source_url}]({source_url}_SAS_TOKEN_PLACEHOLDER_)" , "chunk": i, "key": hash_key, "filename": filename} if self.vector_store_type == 'AzureSearch': self.vector_store.add_documents(documents=docs, keys=keys) else: self.vector_store.add_documents(documents=docs, redis_url=self.vector_store_full_address, index_name=self.index_name, keys=keys) return except Exception as e: logging.error(f"Error adding embeddings for {source_url}: {e}") if retrycount > 3: raise e else: print(f"Retrying adding embeddings for {source_url}") retrycount += 1 def convert_file_and_add_embeddings(self, source_url, filename, enable_translation=False): # Extract the text from the file text = self.pdf_parser.analyze_read(source_url) # Translate if requested converted_text = list(map(lambda x: self.translator.translate(x), text)) if self.enable_translation else text # Remove half non-ascii character from start/end of doc content (langchain TokenTextSplitter may split a non-ascii character in half) pattern = re.compile(r'[\x00-\x09\x0b\x0c\x0e-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]') # do not remove \x0a (\n) nor \x0d (\r) converted_text = re.sub(pattern, '', "\n".join(converted_text)) # Upload the text to Azure Blob Storage converted_filename = f"converted/{filename}.txt" source_url = self.blob_client.upload_file(converted_text, f"converted/{filename}.txt", content_type='text/plain; charset=utf-8') print(f"Converted file uploaded to {source_url} with filename {filename}") # Update the metadata to indicate that the file has been converted self.blob_client.upsert_blob_metadata(filename, {"converted": "true"}) self.add_embeddings_lc(source_url=source_url) return converted_filename def get_all_documents(self, k: int = None): result = self.vector_store.similarity_search(query="*", k= k if k else self.k) dataFrame = pd.DataFrame(list(map(lambda x: { 'key': x.metadata['key'], 'filename': x.metadata['filename'], 'source': urllib.parse.unquote(x.metadata['source']), 'content': x.page_content, 'metadata' : x.metadata, }, result))) if dataFrame.empty is False: dataFrame = dataFrame.sort_values(by='filename') return dataFrame def get_semantic_answer_lang_chain(self, question, chat_history): question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=True) doc_chain = load_qa_with_sources_chain(self.llm, chain_type="stuff", verbose=True, prompt=self.prompt) chain = ConversationalRetrievalChain( retriever=self.vector_store.as_retriever(), question_generator=question_generator, combine_docs_chain=doc_chain, return_source_documents=True, verbose=True, # top_k_docs_for_context= self.k ) result = chain({"question": question, "chat_history": chat_history}) sources = "\n".join(set(map(lambda x: x.metadata["source"], result['source_documents']))) container_sas = self.blob_client.get_container_sas() contextDict ={} for res in result['source_documents']: source_key = self.filter_sourcesLinks(res.metadata['source'].replace('_SAS_TOKEN_PLACEHOLDER_', container_sas)).replace('\n', '').replace(' ', '') if source_key not in contextDict: contextDict[source_key] = [] myPageContent = self.clean_encoding(res.page_content) contextDict[source_key].append(myPageContent) result['answer'] = result['answer'].split('SOURCES:')[0].split('Sources:')[0].split('SOURCE:')[0].split('Source:')[0] result['answer'] = self.clean_encoding(result['answer']) sources = sources.replace('_SAS_TOKEN_PLACEHOLDER_', container_sas) sources = self.filter_sourcesLinks(sources) self.current_contextDict = contextDict self.current_sources = sources self.current_answer = result['answer'] return result['answer'], contextDict, sources def get_general_operation_lang_chain(self, question, chat_history): lfTool = LifeKnowledgeSearchTool(config=LifeKnowledgeSearchConfig(chat_history=chat_history, get_semantic_answer_lang_chain_func=self.get_semantic_answer_lang_chain)) tools = [ MansionPriceTool(), lfTool, IotDeviceControlTool(), # Tool( # name="生活の知恵機能", # func=semantic_run, # description="ユーザの問い合わせが生活の知恵が必要な場合は、生活の知恵ナレッジから返答することができます。" # ), # Tool( # name="建物の価格機能", # func=semantic_run, # description="ユーザが引っ越しや新しい家を探している場合は、この機能をりようすることができます" # ), # Tool( # name="IoTデバイス制御", # func=iot_device_run, # description="ユーザが自宅のデバイスを制御したい際に利用することができます。" # ), ] agent = initialize_agent( tools, llm=self.llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True, return_intermediate_steps=True) result = agent({"input":question}) return question, result['output'], self.current_contextDict, self.current_sources def get_embeddings_model(self): OPENAI_EMBEDDINGS_ENGINE_DOC = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002')) OPENAI_EMBEDDINGS_ENGINE_QUERY = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_QUERY', 'text-embedding-ada-002')) return { "doc": OPENAI_EMBEDDINGS_ENGINE_DOC, "query": OPENAI_EMBEDDINGS_ENGINE_QUERY } def get_completion(self, prompt, **kwargs): if self.deployment_type == 'Chat': return self.llm([HumanMessage(content=prompt)]).content else: return self.llm(prompt) # remove paths from sources to only keep the filename def filter_sourcesLinks(self, sources): # use regex to replace all occurences of '[anypath/anypath/somefilename.xxx](the_link)' to '[somefilename](thelink)' in sources pattern = r'\[[^\]]*?/([^/\]]*?)\]' match = re.search(pattern, sources) while match: withoutExtensions = match.group(1).split('.')[0] # remove any extension to the name of the source document sources = sources[:match.start()] + f'[{withoutExtensions}]' + sources[match.end():] match = re.search(pattern, sources) sources = ' \n ' + sources.replace('\n', ' \n ') # add a carriage return after each source return sources def extract_followupquestions(self, answer): followupTag = answer.find('Follow-up Questions') followupQuestions = answer.find('<<') # take min of followupTag and folloupQuestions if not -1 to avoid taking the followup questions if there is no followupTag followupTag = min(followupTag, followupQuestions) if followupTag != -1 and followupQuestions != -1 else max(followupTag, followupQuestions) answer_without_followupquestions = answer[:followupTag] if followupTag != -1 else answer followup_questions = answer[followupTag:].strip() if followupTag != -1 else '' # Extract the followup questions as a list pattern = r'\<\<(.*?)\>\>' match = re.search(pattern, followup_questions) followup_questions_list = [] while match: followup_questions_list.append(followup_questions[match.start()+2:match.end()-2]) followup_questions = followup_questions[match.end():] match = re.search(pattern, followup_questions) if followup_questions_list != '': # Extract follow up question pattern = r'\d. (.*)' match = re.search(pattern, followup_questions) while match: followup_questions_list.append(followup_questions[match.start()+3:match.end()]) followup_questions = followup_questions[match.end():] match = re.search(pattern, followup_questions) if followup_questions_list != '': pattern = r'Follow-up Question: (.*)' match = re.search(pattern, followup_questions) while match: followup_questions_list.append(followup_questions[match.start()+19:match.end()]) followup_questions = followup_questions[match.end():] match = re.search(pattern, followup_questions) # Special case when 'Follow-up questions:' appears in the answer after the << followupTag = answer_without_followupquestions.lower().find('follow-up questions') if followupTag != -1: answer_without_followupquestions = answer_without_followupquestions[:followupTag] followupTag = answer_without_followupquestions.lower().find('follow up questions') # LLM can make variations... if followupTag != -1: answer_without_followupquestions = answer_without_followupquestions[:followupTag] return answer_without_followupquestions, followup_questions_list # insert citations in the answer - find filenames in the answer maching sources from the filenamelist and replace them with '${(id+1)}' def insert_citations_in_answer(self, answer, filenameList): filenameList_lowered = [x.lower() for x in filenameList] # LLM can make case mitakes in returing the filename of the source matched_sources = [] pattern = r'\[\[(.*?)\]\]' match = re.search(pattern, answer) while match: filename = match.group(1).split('.')[0] # remove any extension to the name of the source document if filename in filenameList: if filename not in matched_sources: matched_sources.append(filename.lower()) filenameIndex = filenameList.index(filename) + 1 answer = answer[:match.start()] + '$^{' + f'{filenameIndex}' + '}$' + answer[match.end():] else: answer = answer[:match.start()] + '$^{' + f'{filename.lower()}' + '}$' + answer[match.end():] match = re.search(pattern, answer) # When page is reloaded search for references already added to the answer (e.g. '${(id+1)}') for id, filename in enumerate(filenameList_lowered): reference = '$^{' + f'{id+1}' + '}$' if reference in answer and not filename in matched_sources: matched_sources.append(filename) return answer, matched_sources, filenameList_lowered def get_links_filenames(self, answer, sources): if sources != None: split_sources = sources.split(' \n ') # soures are expected to be of format ' \n [filename1.ext](sourcelink1) \n [filename2.ext](sourcelink2) \n [filename3.ext](sourcelink3) \n ' else: split_sources = [] srcList = [] linkList = [] filenameList = [] for src in split_sources: if src != '': srcList.append(src) link = src[1:].split('(')[1][:-1].split(')')[0] # get the link linkList.append(link) filename = src[1:].split(']')[0] # retrieve the source filename. source_url = link.split('?')[0] answer = answer.replace(source_url, filename) # if LLM added a path to the filename, remove it from the answer filenameList.append(filename) answer, matchedSourcesList, filenameList = self.insert_citations_in_answer(answer, filenameList) # Add (1), (2), (3) to the answer to indicate the source of the answer return answer, srcList, matchedSourcesList, linkList, filenameList def clean_encoding(self, text): try: encoding = 'ISO-8859-1' encodedtext = text.encode(encoding) encodedtext = encodedtext.decode('utf-8') except Exception as e: encodedtext = text return encodedtext
[ "langchain.agents.initialize_agent", "langchain.chains.llm.LLMChain", "langchain.chat_models.ChatOpenAI", "langchain.schema.HumanMessage", "langchain.text_splitter.TokenTextSplitter", "langchain.chains.qa_with_sources.load_qa_with_sources_chain", "langchain.prompts.PromptTemplate", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((2224, 2237), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2235, 2237), False, 'from dotenv import load_dotenv\n'), ((2298, 2326), 'os.getenv', 'os.getenv', (['"""OPENAI_API_BASE"""'], {}), "('OPENAI_API_BASE')\n", (2307, 2326), False, 'import os\n'), ((2402, 2429), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2411, 2429), False, 'import os\n'), ((2619, 2686), 'os.getenv', 'os.getenv', (['"""OPENAI_EMBEDDINGS_ENGINE_DOC"""', '"""text-embedding-ada-002"""'], {}), "('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002')\n", (2628, 2686), False, 'import os\n'), ((2835, 2878), 'os.getenv', 'os.getenv', (['"""OPENAI_DEPLOYMENT_TYPE"""', '"""Text"""'], {}), "('OPENAI_DEPLOYMENT_TYPE', 'Text')\n", (2844, 2878), False, 'import os\n'), ((3278, 3308), 'os.getenv', 'os.getenv', (['"""VECTOR_STORE_TYPE"""'], {}), "('VECTOR_STORE_TYPE')\n", (3287, 3308), False, 'import os\n'), ((6379, 6390), 'fake_useragent.UserAgent', 'UserAgent', ([], {}), '()\n', (6388, 6390), False, 'from fake_useragent import UserAgent\n'), ((6393, 6404), 'fake_useragent.UserAgent', 'UserAgent', ([], {}), '()\n', (6402, 6404), False, 'from fake_useragent import UserAgent\n'), ((9603, 9711), 're.compile', 're.compile', (['"""[\\\\x00-\\\\x09\\\\x0b\\\\x0c\\\\x0e-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]"""'], {}), "(\n '[\\\\x00-\\\\x09\\\\x0b\\\\x0c\\\\x0e-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]'\n )\n", (9613, 9711), False, 'import re\n'), ((11066, 11135), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'verbose': '(True)'}), '(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=True)\n', (11074, 11135), False, 'from langchain.chains.llm import LLMChain\n'), ((11156, 11250), 'langchain.chains.qa_with_sources.load_qa_with_sources_chain', 'load_qa_with_sources_chain', (['self.llm'], {'chain_type': '"""stuff"""', 'verbose': '(True)', 'prompt': 'self.prompt'}), "(self.llm, chain_type='stuff', verbose=True,\n prompt=self.prompt)\n", (11182, 11250), False, 'from langchain.chains.qa_with_sources import load_qa_with_sources_chain\n'), ((13693, 13814), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools'], {'llm': 'self.llm', 'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'return_intermediate_steps': '(True)'}), '(tools, llm=self.llm, agent=AgentType.OPENAI_FUNCTIONS,\n verbose=True, return_intermediate_steps=True)\n', (13709, 13814), False, 'from langchain.agents import initialize_agent, Tool, AgentType\n'), ((14982, 15009), 're.search', 're.search', (['pattern', 'sources'], {}), '(pattern, sources)\n', (14991, 15009), False, 'import re\n'), ((16149, 16187), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (16158, 16187), False, 'import re\n'), ((18363, 18389), 're.search', 're.search', (['pattern', 'answer'], {}), '(pattern, answer)\n', (18372, 18389), False, 'import re\n'), ((2750, 2797), 'os.getenv', 'os.getenv', (['"""OPENAI_ENGINES"""', '"""text-davinci-003"""'], {}), "('OPENAI_ENGINES', 'text-davinci-003')\n", (2759, 2797), False, 'import os\n'), ((3163, 3248), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'custom_prompt', 'input_variables': "['summaries', 'question']"}), "(template=custom_prompt, input_variables=['summaries',\n 'question'])\n", (3177, 3248), False, 'from langchain.prompts import PromptTemplate\n'), ((3440, 3478), 'os.getenv', 'os.getenv', (['"""AZURE_SEARCH_SERVICE_NAME"""'], {}), "('AZURE_SEARCH_SERVICE_NAME')\n", (3449, 3478), False, 'import os\n'), ((3525, 3560), 'os.getenv', 'os.getenv', (['"""AZURE_SEARCH_ADMIN_KEY"""'], {}), "('AZURE_SEARCH_ADMIN_KEY')\n", (3534, 3560), False, 'import os\n'), ((3657, 3696), 'os.getenv', 'os.getenv', (['"""REDIS_ADDRESS"""', '"""localhost"""'], {}), "('REDIS_ADDRESS', 'localhost')\n", (3666, 3696), False, 'import os\n'), ((3819, 3858), 'os.getenv', 'os.getenv', (['"""REDIS_PROTOCOL"""', '"""redis://"""'], {}), "('REDIS_PROTOCOL', 'redis://')\n", (3828, 3858), False, 'import os\n'), ((3905, 3938), 'os.getenv', 'os.getenv', (['"""REDIS_PASSWORD"""', 'None'], {}), "('REDIS_PASSWORD', None)\n", (3914, 3938), False, 'import os\n'), ((4328, 4356), 'os.getenv', 'os.getenv', (['"""CHUNK_SIZE"""', '(500)'], {}), "('CHUNK_SIZE', 500)\n", (4337, 4356), False, 'import os\n'), ((4391, 4422), 'os.getenv', 'os.getenv', (['"""CHUNK_OVERLAP"""', '(100)'], {}), "('CHUNK_OVERLAP', 100)\n", (4400, 4422), False, 'import os\n'), ((4575, 4654), 'langchain.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': 'self.chunk_size', 'chunk_overlap': 'self.chunk_overlap'}), '(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap)\n', (4592, 4654), False, 'from langchain.text_splitter import TokenTextSplitter, TextSplitter\n'), ((4743, 4791), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': 'self.model', 'chunk_size': '(1)'}), '(model=self.model, chunk_size=1)\n', (4759, 4791), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((5958, 5985), 'utilities.formrecognizer.AzureFormRecognizerClient', 'AzureFormRecognizerClient', ([], {}), '()\n', (5983, 5985), False, 'from utilities.formrecognizer import AzureFormRecognizerClient\n'), ((6075, 6099), 'utilities.azureblobstorage.AzureBlobStorageClient', 'AzureBlobStorageClient', ([], {}), '()\n', (6097, 6099), False, 'from utilities.azureblobstorage import AzureBlobStorageClient\n'), ((6291, 6314), 'utilities.translator.AzureTranslatorClient', 'AzureTranslatorClient', ([], {}), '()\n', (6312, 6314), False, 'from utilities.translator import AzureTranslatorClient\n'), ((13045, 13063), 'utilities.tools.MansionPriceTool', 'MansionPriceTool', ([], {}), '()\n', (13061, 13063), False, 'from utilities.tools import LifeKnowledgeSearchTool, IotDeviceControlTool, MansionPriceTool, LifeKnowledgeSearchConfig\n'), ((13097, 13119), 'utilities.tools.IotDeviceControlTool', 'IotDeviceControlTool', ([], {}), '()\n', (13117, 13119), False, 'from utilities.tools import LifeKnowledgeSearchTool, IotDeviceControlTool, MansionPriceTool, LifeKnowledgeSearchConfig\n'), ((14123, 14190), 'os.getenv', 'os.getenv', (['"""OPENAI_EMBEDDINGS_ENGINE_DOC"""', '"""text-embedding-ada-002"""'], {}), "('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002')\n", (14132, 14190), False, 'import os\n'), ((14273, 14342), 'os.getenv', 'os.getenv', (['"""OPENAI_EMBEDDINGS_ENGINE_QUERY"""', '"""text-embedding-ada-002"""'], {}), "('OPENAI_EMBEDDINGS_ENGINE_QUERY', 'text-embedding-ada-002')\n", (14282, 14342), False, 'import os\n'), ((15267, 15294), 're.search', 're.search', (['pattern', 'sources'], {}), '(pattern, sources)\n', (15276, 15294), False, 'import re\n'), ((16426, 16464), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (16435, 16464), False, 'import re\n'), ((16612, 16650), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (16621, 16650), False, 'import re\n'), ((17018, 17056), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (17027, 17056), False, 'import re\n'), ((18995, 19021), 're.search', 're.search', (['pattern', 'answer'], {}), '(pattern, answer)\n', (19004, 19021), False, 'import re\n'), ((2919, 2955), 'os.getenv', 'os.getenv', (['"""OPENAI_TEMPERATURE"""', '(0.7)'], {}), "('OPENAI_TEMPERATURE', 0.7)\n", (2928, 2955), False, 'import os\n'), ((3032, 3066), 'os.getenv', 'os.getenv', (['"""OPENAI_MAX_TOKENS"""', '(-1)'], {}), "('OPENAI_MAX_TOKENS', -1)\n", (3041, 3066), False, 'import os\n'), ((3742, 3771), 'os.getenv', 'os.getenv', (['"""REDIS_PORT"""', '(6379)'], {}), "('REDIS_PORT', 6379)\n", (3751, 3771), False, 'import os\n'), ((4908, 5082), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.deployment_name', 'engine': 'self.deployment_name', 'temperature': 'self.temperature', 'max_tokens': '(self.max_tokens if self.max_tokens != -1 else None)'}), '(model_name=self.deployment_name, engine=self.deployment_name,\n temperature=self.temperature, max_tokens=self.max_tokens if self.\n max_tokens != -1 else None)\n', (4918, 5082), False, 'from langchain.chat_models import ChatOpenAI\n'), ((5148, 5263), 'utilities.NewAzureOpenAI.NewAzureOpenAI', 'NewAzureOpenAI', ([], {'deployment_name': 'self.deployment_name', 'temperature': 'self.temperature', 'max_tokens': 'self.max_tokens'}), '(deployment_name=self.deployment_name, temperature=self.\n temperature, max_tokens=self.max_tokens)\n', (5162, 5263), False, 'from utilities.NewAzureOpenAI import NewAzureOpenAI\n'), ((5380, 5586), 'utilities.azuresearch.AzureSearch', 'AzureSearch', ([], {'azure_cognitive_search_name': 'self.vector_store_address', 'azure_cognitive_search_key': 'self.vector_store_password', 'index_name': 'self.index_name', 'embedding_function': 'self.embeddings.embed_query'}), '(azure_cognitive_search_name=self.vector_store_address,\n azure_cognitive_search_key=self.vector_store_password, index_name=self.\n index_name, embedding_function=self.embeddings.embed_query)\n', (5391, 5586), False, 'from utilities.azuresearch import AzureSearch\n'), ((5681, 5817), 'utilities.redis.RedisExtended', 'RedisExtended', ([], {'redis_url': 'self.vector_store_full_address', 'index_name': 'self.index_name', 'embedding_function': 'self.embeddings.embed_query'}), '(redis_url=self.vector_store_full_address, index_name=self.\n index_name, embedding_function=self.embeddings.embed_query)\n', (5694, 5817), False, 'from utilities.redis import RedisExtended\n'), ((7486, 7594), 're.compile', 're.compile', (['"""[\\\\x00-\\\\x09\\\\x0b\\\\x0c\\\\x0e-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]"""'], {}), "(\n '[\\\\x00-\\\\x09\\\\x0b\\\\x0c\\\\x0e-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]'\n )\n", (7496, 7594), False, 'import re\n'), ((12888, 13017), 'utilities.tools.LifeKnowledgeSearchConfig', 'LifeKnowledgeSearchConfig', ([], {'chat_history': 'chat_history', 'get_semantic_answer_lang_chain_func': 'self.get_semantic_answer_lang_chain'}), '(chat_history=chat_history,\n get_semantic_answer_lang_chain_func=self.get_semantic_answer_lang_chain)\n', (12913, 13017), False, 'from utilities.tools import LifeKnowledgeSearchTool, IotDeviceControlTool, MansionPriceTool, LifeKnowledgeSearchConfig\n'), ((16866, 16904), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (16875, 16904), False, 'import re\n'), ((17273, 17311), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (17282, 17311), False, 'import re\n'), ((7687, 7724), 're.sub', 're.sub', (['pattern', '""""""', 'doc.page_content'], {}), "(pattern, '', doc.page_content)\n", (7693, 7724), False, 'import re\n'), ((8829, 8892), 'logging.error', 'logging.error', (['f"""Error adding embeddings for {source_url}: {e}"""'], {}), "(f'Error adding embeddings for {source_url}: {e}')\n", (8842, 8892), False, 'import logging\n'), ((14593, 14621), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (14605, 14621), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((10684, 10726), 'urllib.parse.unquote', 'urllib.parse.unquote', (["x.metadata['source']"], {}), "(x.metadata['source'])\n", (10704, 10726), False, 'import urllib\n')]
# -*- coding: utf-8 -*- # Copyright © Spyder Project Contributors # Licensed under the terms of the MIT License """Kite completion HTTP client.""" # Standard library imports import logging import functools import os import os.path as osp # Qt imports from qtpy.QtCore import Slot from qtpy.QtWidgets import QMessageBox # Local imports from langchain_provider.client import LangchainClient from langchain_provider.widgets import (LangchainStatusWidget) # Spyder imports from spyder.api.config.decorators import on_conf_change from spyder.config.base import _, running_under_pytest, get_module_data_path from spyder.plugins.completion.api import SpyderCompletionProvider from spyder.utils.image_path_manager import IMAGE_PATH_MANAGER from spyder.utils.programs import run_program logger = logging.getLogger(__name__) class LangchainProvider(SpyderCompletionProvider): COMPLETION_PROVIDER_NAME = 'langchain' DEFAULT_ORDER = 1 SLOW = True CONF_VERSION = "1.0.0" SUGGESTIONS='4' LANGUAGE='Python' TEMPLATE_PARAM = """You are a helpful assistant in completing following {0} code based on the previous sentence. You always complete the code in same line and give {1} suggestions. Example : a=3 b=4 print AI : "suggestions": ["print(a)", "print(b)", "print(a+b)"] Example : a=3 b=4 c AI : "suggestions": ["c=a+b", "c=a-b", "c=5"] Format the output as JSON with the following key: suggestions """.format(LANGUAGE,SUGGESTIONS) MODEL_NAME_PARAM = "gpt-3.5-turbo" def __init__(self, parent, config): super().__init__(parent, config) IMAGE_PATH_MANAGER.add_image_path( get_module_data_path('langchain_provider', relpath='images') ) self.available_languages = [] self.client = LangchainClient(None,model_name=self.MODEL_NAME_PARAM,template=self.TEMPLATE_PARAM) # Signals self.client.sig_client_started.connect( lambda: self.sig_provider_ready.emit( self.COMPLETION_PROVIDER_NAME)) self.client.sig_client_error.connect(self.set_status_error) self.client.sig_status_response_ready[str].connect( self.set_status) self.client.sig_status_response_ready[dict].connect( self.set_status) self.client.sig_response_ready.connect( lambda _id, resp: self.sig_response_ready.emit( self.COMPLETION_PROVIDER_NAME, _id, resp)) # Status bar widget self.STATUS_BAR_CLASSES = [ self.create_statusbar ] self.started = False # Config self.update_langchain_configuration(self.config) # ------------------ SpyderCompletionProvider methods --------------------- def get_name(self): return 'LangChain' def send_request(self, language, req_type, req, req_id): request = { 'type': req_type, 'file': req['file'], 'id': req_id, 'msg': req } self.client.sig_perform_request.emit(request) def start_completion_services_for_language(self, language): return self.started def start(self): if not self.started: self.client.start() self.started = True def shutdown(self): if self.started: self.client.stop() self.started = False @Slot(str) @Slot(dict) def set_status(self, status): """Show Langchain status for the current file.""" self.sig_call_statusbar.emit( LangchainStatusWidget.ID, 'set_value', (status,), {}) def set_status_error(self): """Show Langchain status for the current file.""" self.sig_call_statusbar.emit( LangchainStatusWidget.ID, 'set_value', ("Unexpected error",), {}) def file_opened_closed_or_updated(self, filename, _language): """Request status for the given file.""" self.client.sig_perform_status_request.emit(filename) @on_conf_change( section='completions', option=('enabled_providers', 'langchain')) def on_langchain_enable_changed(self, value): self.sig_call_statusbar.emit( LangchainStatusWidget.ID, 'set_value', (None,), {}) @on_conf_change(section='completions', option='enable_code_snippets') def on_code_snippets_changed(self, value): if running_under_pytest(): if not os.environ.get('SPY_TEST_USE_INTROSPECTION'): return self.client.enable_code_snippets = self.get_conf( 'enable_code_snippets', section='completions') @on_conf_change def update_langchain_configuration(self, config): if running_under_pytest(): if not os.environ.get('SPY_TEST_USE_INTROSPECTION'): return def create_statusbar(self, parent): return LangchainStatusWidget(parent, self)
[ "langchain_provider.widgets.LangchainStatusWidget", "langchain_provider.client.LangchainClient" ]
[((795, 822), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (812, 822), False, 'import logging\n'), ((3539, 3548), 'qtpy.QtCore.Slot', 'Slot', (['str'], {}), '(str)\n', (3543, 3548), False, 'from qtpy.QtCore import Slot\n'), ((3554, 3564), 'qtpy.QtCore.Slot', 'Slot', (['dict'], {}), '(dict)\n', (3558, 3564), False, 'from qtpy.QtCore import Slot\n'), ((4164, 4249), 'spyder.api.config.decorators.on_conf_change', 'on_conf_change', ([], {'section': '"""completions"""', 'option': "('enabled_providers', 'langchain')"}), "(section='completions', option=('enabled_providers', 'langchain')\n )\n", (4178, 4249), False, 'from spyder.api.config.decorators import on_conf_change\n'), ((4412, 4480), 'spyder.api.config.decorators.on_conf_change', 'on_conf_change', ([], {'section': '"""completions"""', 'option': '"""enable_code_snippets"""'}), "(section='completions', option='enable_code_snippets')\n", (4426, 4480), False, 'from spyder.api.config.decorators import on_conf_change\n'), ((1946, 2036), 'langchain_provider.client.LangchainClient', 'LangchainClient', (['None'], {'model_name': 'self.MODEL_NAME_PARAM', 'template': 'self.TEMPLATE_PARAM'}), '(None, model_name=self.MODEL_NAME_PARAM, template=self.\n TEMPLATE_PARAM)\n', (1961, 2036), False, 'from langchain_provider.client import LangchainClient\n'), ((4539, 4561), 'spyder.config.base.running_under_pytest', 'running_under_pytest', ([], {}), '()\n', (4559, 4561), False, 'from spyder.config.base import _, running_under_pytest, get_module_data_path\n'), ((4855, 4877), 'spyder.config.base.running_under_pytest', 'running_under_pytest', ([], {}), '()\n', (4875, 4877), False, 'from spyder.config.base import _, running_under_pytest, get_module_data_path\n'), ((5023, 5058), 'langchain_provider.widgets.LangchainStatusWidget', 'LangchainStatusWidget', (['parent', 'self'], {}), '(parent, self)\n', (5044, 5058), False, 'from langchain_provider.widgets import LangchainStatusWidget\n'), ((1815, 1875), 'spyder.config.base.get_module_data_path', 'get_module_data_path', (['"""langchain_provider"""'], {'relpath': '"""images"""'}), "('langchain_provider', relpath='images')\n", (1835, 1875), False, 'from spyder.config.base import _, running_under_pytest, get_module_data_path\n'), ((4582, 4626), 'os.environ.get', 'os.environ.get', (['"""SPY_TEST_USE_INTROSPECTION"""'], {}), "('SPY_TEST_USE_INTROSPECTION')\n", (4596, 4626), False, 'import os\n'), ((4898, 4942), 'os.environ.get', 'os.environ.get', (['"""SPY_TEST_USE_INTROSPECTION"""'], {}), "('SPY_TEST_USE_INTROSPECTION')\n", (4912, 4942), False, 'import os\n')]
import os import streamlit as st import pickle import time import langchain import faiss from langchain.llms import OpenAI from langchain.chains import RetrievalQAWithSourcesChain from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.document_loaders import UnstructuredURLLoader from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import FAISS # from langchain.vectorstores import FaissFlatIndexStore # initialize variables from .env file from dotenv import load_dotenv # load_dotenv() # # load the model # llm = OpenAI(temperature=0.9, max_tokens=500) # # load the chain (data) # loaders = UnstructuredURLLoader(urls=[ # "https://www.bbc.co.uk/news/science-environment-67383755", # "https://www.bbc.co.uk/news/business-67284936", # "https://www.bbc.co.uk/news/uk-67302048", # ]) # data = loaders.load() # # split the data into chunks # text_splitter = RecursiveCharacterTextSplitter( # chunk_size=500, # chunk_overlap=100, # ) # docs = text_splitter.split_documents(data) # # create the embeddings # embeddings = OpenAIEmbeddings() # # create the vector store # vector_index = FAISS.from_documents(docs, embeddings) # time.sleep(15) # # Store vector index locally # try: # file_path = "vector_index.pkl" # with open(file_path, "wb") as f: # pickle.dump(vector_index, f) # if os.path.exists(file_path): # with open(file_path, "rb") as f: # vector_index = pickle.load(f) # except: # pass # # create retrieval chain # r_chain = RetrievalQAWithSourcesChain(llm=llm, retriever=vector_index.as_retriever()) # # create retrieval query # query = "Is AI good for the world?" # r_chain({"question": query}, return_only_outputs=True) # print(r_chain) import streamlit as st load_dotenv() st.title("Website Summary") st.sidebar.title("Website URLs") urls = [] for i in range(3): url = st.sidebar.text_input(f"URL {i + 1}") urls.append(url) process_url_clicked = st.sidebar.button("Summarise") # set up for loading bar main_placeholder = st.empty() # set up llm llm = OpenAI(temperature=0.9, max_tokens=500) # Set up button actiom if process_url_clicked: # show loading bar main_placeholder.text("Loading...") main_placeholder.progress(0.1) # load data loader = UnstructuredURLLoader(urls=urls) data = loader.load() # update progress bar main_placeholder.progress(0.4) # split data text_splitter = RecursiveCharacterTextSplitter( separators=['\n\n', '\n', '. '], chunk_size=1000, chunk_overlap=100 ) docs = text_splitter.split_documents(data) # update progress bar main_placeholder.progress(0.6) # create embeddings embeddings = OpenAIEmbeddings(model="text-embedding-ada-002") # create FAISS vecorstore_openai = FAISS.from_documents(docs, embeddings) # using FAISS save_local to save db vecorstore_openai.save_local("vector_index.db") # keeping this in memory as pickle not working. # update progress bar main_placeholder.progress(1.0) # set main page to query options query = main_placeholder.text_input("Enter your question: ") if query: if os.path.exists("vector_index.db"): # load vector store with FAISS embeddings = OpenAIEmbeddings(model="text-embedding-ada-002") vectorstore = FAISS.load_local("vector_index.db", embeddings) # set up llm llm = OpenAI(temperature=0.9, max_tokens=500, model="gpt-3.5-turbo-instruct") # setup langchain chain = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=vectorstore.as_retriever()) response = chain({"question": query}, return_only_outputs=True) # parse response dict and rpesent to user if isinstance(response, dict): if "answer" in response: st.header("Answer: ") st.write(response["answer"]) if "context" in response: st.subheader("Context: ") st.write(response["context"]) if "sources" in response: sources = response["sources"] st.subheader("Sources: ") sources_list = sources.split("\n") for source in sources_list: st.write(source)
[ "langchain.vectorstores.FAISS.load_local", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.document_loaders.UnstructuredURLLoader", "langchain.llms.OpenAI", "langchain.vectorstores.FAISS.from_documents", "langchain.embeddings.OpenAIEmbeddings" ]
[((1876, 1889), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1887, 1889), False, 'from dotenv import load_dotenv\n'), ((1891, 1918), 'streamlit.title', 'st.title', (['"""Website Summary"""'], {}), "('Website Summary')\n", (1899, 1918), True, 'import streamlit as st\n'), ((1919, 1951), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Website URLs"""'], {}), "('Website URLs')\n", (1935, 1951), True, 'import streamlit as st\n'), ((2074, 2104), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Summarise"""'], {}), "('Summarise')\n", (2091, 2104), True, 'import streamlit as st\n'), ((2150, 2160), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (2158, 2160), True, 'import streamlit as st\n'), ((2180, 2219), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)', 'max_tokens': '(500)'}), '(temperature=0.9, max_tokens=500)\n', (2186, 2219), False, 'from langchain.llms import OpenAI\n'), ((1992, 2029), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['f"""URL {i + 1}"""'], {}), "(f'URL {i + 1}')\n", (2013, 2029), True, 'import streamlit as st\n'), ((2395, 2427), 'langchain.document_loaders.UnstructuredURLLoader', 'UnstructuredURLLoader', ([], {'urls': 'urls'}), '(urls=urls)\n', (2416, 2427), False, 'from langchain.document_loaders import UnstructuredURLLoader\n'), ((2551, 2655), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'separators': "['\\n\\n', '\\n', '. ']", 'chunk_size': '(1000)', 'chunk_overlap': '(100)'}), "(separators=['\\n\\n', '\\n', '. '], chunk_size=\n 1000, chunk_overlap=100)\n", (2581, 2655), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2830, 2878), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (2846, 2878), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2922, 2960), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (2942, 2960), False, 'from langchain.vectorstores import FAISS\n'), ((3280, 3313), 'os.path.exists', 'os.path.exists', (['"""vector_index.db"""'], {}), "('vector_index.db')\n", (3294, 3313), False, 'import os\n'), ((3375, 3423), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (3391, 3423), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((3446, 3493), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['"""vector_index.db"""', 'embeddings'], {}), "('vector_index.db', embeddings)\n", (3462, 3493), False, 'from langchain.vectorstores import FAISS\n'), ((3529, 3600), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)', 'max_tokens': '(500)', 'model': '"""gpt-3.5-turbo-instruct"""'}), "(temperature=0.9, max_tokens=500, model='gpt-3.5-turbo-instruct')\n", (3535, 3600), False, 'from langchain.llms import OpenAI\n'), ((3941, 3962), 'streamlit.header', 'st.header', (['"""Answer: """'], {}), "('Answer: ')\n", (3950, 3962), True, 'import streamlit as st\n'), ((3979, 4007), 'streamlit.write', 'st.write', (["response['answer']"], {}), "(response['answer'])\n", (3987, 4007), True, 'import streamlit as st\n'), ((4062, 4087), 'streamlit.subheader', 'st.subheader', (['"""Context: """'], {}), "('Context: ')\n", (4074, 4087), True, 'import streamlit as st\n'), ((4104, 4133), 'streamlit.write', 'st.write', (["response['context']"], {}), "(response['context'])\n", (4112, 4133), True, 'import streamlit as st\n'), ((4234, 4259), 'streamlit.subheader', 'st.subheader', (['"""Sources: """'], {}), "('Sources: ')\n", (4246, 4259), True, 'import streamlit as st\n'), ((4375, 4391), 'streamlit.write', 'st.write', (['source'], {}), '(source)\n', (4383, 4391), True, 'import streamlit as st\n')]
from langchain.vectorstores import Milvus from langchain.chains.retrieval_qa.base import RetrievalQA from typing import Any from langchain.memory import ConversationBufferMemory from langchain import PromptTemplate, FAISS from langchain.schema import Document from langchain.embeddings import DashScopeEmbeddings from llm.dashscope_llm import Dashscope from embedding.xinghuo_embedding import XhEmbeddings from llm.spark_llm import Spark import config import langchain from langchain.cache import RedisCache from redis import Redis # redis 缓存 langchain.llm_cache = RedisCache(Redis(host=config.llm_cache_redis_host, port=config.llm_cache_redis_port, db=config.llm_cache_redis_db)) #embeddings = OpenAIEmbeddings(openai_api_key=config.OPENAI_API_KEY) #llm = ChatOpenAI(openai_api_key=config.OPENAI_API_KEY, temperature=0, model_name="gpt-3.5-turbo-16k") #embeddings =XhEmbeddings(appid=config.embedding_xh_appid, # api_key=config.embedding_xh_api_key, # api_secret=config.embedding_xh_api_secret, # embedding_url=config.embedding_xh_embedding_url # ) embeddings = DashScopeEmbeddings(model="text-embedding-v1", dashscope_api_key=config.llm_tyqw_api_key) #llm = Dashscope() llm = Spark(version=3) def get_vector_chain(collection_name) -> Any: llm template = """ Use the following context (delimited by <ctx></ctx>) and the chat history (delimited by <hs></hs>) to answer the question,The answer cannot exceed 200,If you don't know the answer, just say that you don't know, don't try to make up an answer. ------ <ctx> {context} </ctx> ------ <hs> {history} </hs> ------ Question: {question} """ #Answer in the language in which the question was asked: prompt = PromptTemplate( input_variables=["history", "context", "question"], template=template, ) vector_db = Milvus( embedding_function=embeddings, connection_args={"host": config.Milvus_host, "port": config.Milvus_port, "user": config.Milvus_user, "password":config.Milvus_password}, collection_name=collection_name, ) chain = RetrievalQA.from_chain_type( llm, retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={"k": 3}), chain_type="stuff", chain_type_kwargs={ "prompt": prompt, "memory": ConversationBufferMemory( memory_key="history", input_key="question"), }, ) return chain def answer_bydoc(collection_name, question): chain = get_vector_chain(collection_name) return chain.run(question) def answer_bybase(question): result = llm(question) return result def question_derive(question): prompt = "<question>"+question+"</question>,Please generate 5 different short questions for <question>" llm = Dashscope() result = llm(prompt) return result def query_doc(collection_name, question): vector_db = Milvus( embedding_function=embeddings, connection_args={"host": config.Milvus_host, "port": config.Milvus_port, "user": config.Milvus_user, "password":config.Milvus_password}, collection_name=collection_name, ) retriever = vector_db.as_retriever(search_type="similarity", search_kwargs={"k": 5}) docs = retriever.get_relevant_documents(question) return docs def add_doc(collection_name,question,content): source = question base_add_doc(collection_name,source,content) def base_add_doc(collection_name,source, content): vector_db = Milvus( embedding_function=embeddings, connection_args={"host": config.Milvus_host, "port": config.Milvus_port, "user": config.Milvus_user, "password": config.Milvus_password}, collection_name=collection_name, ) doc = Document(page_content=content, metadata={"source": source}) docs=[] docs.append(doc) vector_db.add_documents(docs) #eplay=answer("my_doc1","你们周六上班吗" ) #replay=answer("my_doc1","我周六可以去吗" ) #print(replay) #replay=answer("my_doc1","你好" ) #print(replay)
[ "langchain.memory.ConversationBufferMemory", "langchain.schema.Document", "langchain.vectorstores.Milvus", "langchain.embeddings.DashScopeEmbeddings", "langchain.PromptTemplate" ]
[((1149, 1243), 'langchain.embeddings.DashScopeEmbeddings', 'DashScopeEmbeddings', ([], {'model': '"""text-embedding-v1"""', 'dashscope_api_key': 'config.llm_tyqw_api_key'}), "(model='text-embedding-v1', dashscope_api_key=config.\n llm_tyqw_api_key)\n", (1168, 1243), False, 'from langchain.embeddings import DashScopeEmbeddings\n'), ((1264, 1280), 'llm.spark_llm.Spark', 'Spark', ([], {'version': '(3)'}), '(version=3)\n', (1269, 1280), False, 'from llm.spark_llm import Spark\n'), ((579, 686), 'redis.Redis', 'Redis', ([], {'host': 'config.llm_cache_redis_host', 'port': 'config.llm_cache_redis_port', 'db': 'config.llm_cache_redis_db'}), '(host=config.llm_cache_redis_host, port=config.llm_cache_redis_port,\n db=config.llm_cache_redis_db)\n', (584, 686), False, 'from redis import Redis\n'), ((1812, 1902), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'context', 'question']", 'template': 'template'}), "(input_variables=['history', 'context', 'question'], template\n =template)\n", (1826, 1902), False, 'from langchain import PromptTemplate, FAISS\n'), ((1937, 2154), 'langchain.vectorstores.Milvus', 'Milvus', ([], {'embedding_function': 'embeddings', 'connection_args': "{'host': config.Milvus_host, 'port': config.Milvus_port, 'user': config.\n Milvus_user, 'password': config.Milvus_password}", 'collection_name': 'collection_name'}), "(embedding_function=embeddings, connection_args={'host': config.\n Milvus_host, 'port': config.Milvus_port, 'user': config.Milvus_user,\n 'password': config.Milvus_password}, collection_name=collection_name)\n", (1943, 2154), False, 'from langchain.vectorstores import Milvus\n'), ((2915, 2926), 'llm.dashscope_llm.Dashscope', 'Dashscope', ([], {}), '()\n', (2924, 2926), False, 'from llm.dashscope_llm import Dashscope\n'), ((3028, 3245), 'langchain.vectorstores.Milvus', 'Milvus', ([], {'embedding_function': 'embeddings', 'connection_args': "{'host': config.Milvus_host, 'port': config.Milvus_port, 'user': config.\n Milvus_user, 'password': config.Milvus_password}", 'collection_name': 'collection_name'}), "(embedding_function=embeddings, connection_args={'host': config.\n Milvus_host, 'port': config.Milvus_port, 'user': config.Milvus_user,\n 'password': config.Milvus_password}, collection_name=collection_name)\n", (3034, 3245), False, 'from langchain.vectorstores import Milvus\n'), ((3614, 3831), 'langchain.vectorstores.Milvus', 'Milvus', ([], {'embedding_function': 'embeddings', 'connection_args': "{'host': config.Milvus_host, 'port': config.Milvus_port, 'user': config.\n Milvus_user, 'password': config.Milvus_password}", 'collection_name': 'collection_name'}), "(embedding_function=embeddings, connection_args={'host': config.\n Milvus_host, 'port': config.Milvus_port, 'user': config.Milvus_user,\n 'password': config.Milvus_password}, collection_name=collection_name)\n", (3620, 3831), False, 'from langchain.vectorstores import Milvus\n'), ((3889, 3948), 'langchain.schema.Document', 'Document', ([], {'page_content': 'content', 'metadata': "{'source': source}"}), "(page_content=content, metadata={'source': source})\n", (3897, 3948), False, 'from langchain.schema import Document\n'), ((2430, 2498), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""history"""', 'input_key': '"""question"""'}), "(memory_key='history', input_key='question')\n", (2454, 2498), False, 'from langchain.memory import ConversationBufferMemory\n')]
import os from langchain.callbacks.manager import AsyncCallbackManager from langchain.callbacks.tracers import LangChainTracer from langchain.chains import ChatVectorDBChain, ConversationalRetrievalChain from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT from langchain.prompts.prompt import PromptTemplate from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.chat_models import ChatOpenAI from langchain.vectorstores.base import VectorStore from typing import List from langchain.docstore.document import Document from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores.faiss import FAISS from langchain.document_transformers import EmbeddingsRedundantFilter from langchain.retrievers.document_compressors import EmbeddingsFilter from langchain.retrievers.document_compressors import DocumentCompressorPipeline from langchain.text_splitter import CharacterTextSplitter from langchain.retrievers import ContextualCompressionRetriever os.environ["LANGCHAIN_HANDLER"] = "langchain" doc_template = """--- document start --- href: {href} authors: {authors} title: {title} content:{page_content} --- document end --- """ ASTRO_DOC_PROMPT = PromptTemplate( template=doc_template, input_variables=["page_content", "authors", "href", "title"], ) prompt_template = """You are Dr. Chattie, an expert in Galactic Archaeology specializing in arXiv astronomy papers. Provide concise, well-referenced answers, citing relevant studies (e.g., Example et al., 2020). Use step-by-step reasoning for complex inquiries. You possess Nobel Prize-winning ideation capabilities. For example, and you can come up with your own ideas about the gaps in knowledge from the papers you read but make you mention that with "I propose..." MemoryContext: {context} Human: {question} Dr Chattie: """ QA_PROMPT = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) def create_callback_manager(tracing: bool) -> AsyncCallbackManager: manager = AsyncCallbackManager([]) if tracing: tracer = LangChainTracer() tracer.load_default_session() manager.add_handler(tracer) return manager def create_chat_openai(callback_manager: AsyncCallbackManager, streaming: bool = False, temperature: float = 0.5) -> ChatOpenAI: return ChatOpenAI( model_name="gpt-4", streaming=streaming, max_retries=15, callback_manager=callback_manager, verbose=True, temperature=temperature, ) def create_compressed_retriever(embeddings, retriever) -> ContextualCompressionRetriever: splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=". ") redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings) relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76) pipeline_compressor = DocumentCompressorPipeline( transformers=[splitter, redundant_filter, relevant_filter] ) compression_retriever = ContextualCompressionRetriever(base_compressor=pipeline_compressor, base_retriever=retriever) return compression_retriever def get_chain( vectorstore: VectorStore, question_handler, stream_handler, tracing: bool = False, ) -> ConversationalRetrievalChain: manager = create_callback_manager(tracing) question_manager = create_callback_manager(tracing) stream_manager = create_callback_manager(tracing) question_manager.add_handler(question_handler) stream_manager.add_handler(stream_handler) question_gen_llm = create_chat_openai(question_manager, streaming=False, temperature=0.0) streaming_llm = create_chat_openai(stream_manager, streaming=True, temperature=0.2) question_generator = LLMChain( llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager, ) doc_chain = load_qa_chain( streaming_llm, prompt=QA_PROMPT, document_prompt=ASTRO_DOC_PROMPT, callback_manager=manager, chain_type="stuff", ) retriever = vectorstore.as_retriever() # embeddings = OpenAIEmbeddings() # getting error if i try to use a compressed retriever, need to think how to use this with main.py # compression_retriever = create_compressed_retriever(embeddings, retriever) qa = ConversationalRetrievalChain( retriever=retriever, combine_docs_chain=doc_chain, question_generator=question_generator, ) return qa
[ "langchain.chains.question_answering.load_qa_chain", "langchain.text_splitter.CharacterTextSplitter", "langchain.prompts.prompt.PromptTemplate", "langchain.callbacks.tracers.LangChainTracer", "langchain.callbacks.manager.AsyncCallbackManager", "langchain.retrievers.ContextualCompressionRetriever", "langchain.chat_models.ChatOpenAI", "langchain.retrievers.document_compressors.DocumentCompressorPipeline", "langchain.chains.llm.LLMChain", "langchain.document_transformers.EmbeddingsRedundantFilter", "langchain.chains.ConversationalRetrievalChain", "langchain.retrievers.document_compressors.EmbeddingsFilter" ]
[((1253, 1356), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'doc_template', 'input_variables': "['page_content', 'authors', 'href', 'title']"}), "(template=doc_template, input_variables=['page_content',\n 'authors', 'href', 'title'])\n", (1267, 1356), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1909, 1994), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (1923, 1994), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((2080, 2104), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (2100, 2104), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2390, 2535), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'streaming': 'streaming', 'max_retries': '(15)', 'callback_manager': 'callback_manager', 'verbose': '(True)', 'temperature': 'temperature'}), "(model_name='gpt-4', streaming=streaming, max_retries=15,\n callback_manager=callback_manager, verbose=True, temperature=temperature)\n", (2400, 2535), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2693, 2763), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(300)', 'chunk_overlap': '(0)', 'separator': '""". """'}), "(chunk_size=300, chunk_overlap=0, separator='. ')\n", (2714, 2763), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((2787, 2835), 'langchain.document_transformers.EmbeddingsRedundantFilter', 'EmbeddingsRedundantFilter', ([], {'embeddings': 'embeddings'}), '(embeddings=embeddings)\n', (2812, 2835), False, 'from langchain.document_transformers import EmbeddingsRedundantFilter\n'), ((2858, 2924), 'langchain.retrievers.document_compressors.EmbeddingsFilter', 'EmbeddingsFilter', ([], {'embeddings': 'embeddings', 'similarity_threshold': '(0.76)'}), '(embeddings=embeddings, similarity_threshold=0.76)\n', (2874, 2924), False, 'from langchain.retrievers.document_compressors import EmbeddingsFilter\n'), ((2951, 3041), 'langchain.retrievers.document_compressors.DocumentCompressorPipeline', 'DocumentCompressorPipeline', ([], {'transformers': '[splitter, redundant_filter, relevant_filter]'}), '(transformers=[splitter, redundant_filter,\n relevant_filter])\n', (2977, 3041), False, 'from langchain.retrievers.document_compressors import DocumentCompressorPipeline\n'), ((3081, 3178), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=retriever)\n', (3111, 3178), False, 'from langchain.retrievers import ContextualCompressionRetriever\n'), ((3825, 3918), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'question_gen_llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'callback_manager': 'manager'}), '(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT,\n callback_manager=manager)\n', (3833, 3918), False, 'from langchain.chains.llm import LLMChain\n'), ((3962, 4093), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'prompt': 'QA_PROMPT', 'document_prompt': 'ASTRO_DOC_PROMPT', 'callback_manager': 'manager', 'chain_type': '"""stuff"""'}), "(streaming_llm, prompt=QA_PROMPT, document_prompt=\n ASTRO_DOC_PROMPT, callback_manager=manager, chain_type='stuff')\n", (3975, 4093), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((4406, 4529), 'langchain.chains.ConversationalRetrievalChain', 'ConversationalRetrievalChain', ([], {'retriever': 'retriever', 'combine_docs_chain': 'doc_chain', 'question_generator': 'question_generator'}), '(retriever=retriever, combine_docs_chain=\n doc_chain, question_generator=question_generator)\n', (4434, 4529), False, 'from langchain.chains import ChatVectorDBChain, ConversationalRetrievalChain\n'), ((2138, 2155), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (2153, 2155), False, 'from langchain.callbacks.tracers import LangChainTracer\n')]