method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
configurable_alternatives
from langchain_core.runnables.configurable import RunnableConfigurableAlternatives return RunnableConfigurableAlternatives(which=which, default=self, alternatives=kwargs, default_key=default_key, prefix_keys=prefix_keys)
def configurable_alternatives(self, which: ConfigurableField, *, default_key: str='default', prefix_keys: bool=False, **kwargs: Union[ Runnable[Input, Output], Callable[[], Runnable[Input, Output]]] ) ->RunnableSerializable[Input, Output]: from langchain_core.runnables.configurable import RunnableConfigurableAlternatives return RunnableConfigurableAlternatives(which=which, default=self, alternatives=kwargs, default_key=default_key, prefix_keys=prefix_keys)
null
as_import_path
"""Path of the file as a LangChain import exclude langchain top namespace.""" if isinstance(file, str): file = Path(file) path = get_relative_path(file, relative_to=relative_to) if file.is_file(): path = path[:-len(file.suffix)] import_path = path.replace(SEPARATOR, '.') if suffix: import_path += '.' + suffix return import_path
def as_import_path(file: Union[Path, str], *, suffix: Optional[str]=None, relative_to: Path=PACKAGE_DIR) ->str: """Path of the file as a LangChain import exclude langchain top namespace.""" if isinstance(file, str): file = Path(file) path = get_relative_path(file, relative_to=relative_to) if file.is_file(): path = path[:-len(file.suffix)] import_path = path.replace(SEPARATOR, '.') if suffix: import_path += '.' + suffix return import_path
Path of the file as a LangChain import exclude langchain top namespace.
on_chat_model_start
assert all(isinstance(m, BaseMessage) for m in chain(*messages)) self.on_chat_model_start_common()
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[ List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: assert all(isinstance(m, BaseMessage) for m in chain(*messages)) self.on_chat_model_start_common()
null
_convert_message_to_dict
"""Converts message to a dict according to role""" content = cast(str, message.content) if isinstance(message, HumanMessage): return {'role': 'user', 'content': ContentFormatterBase. escape_special_characters(content)} elif isinstance(message, AIMessage): return {'role': 'assistant', 'content': ContentFormatterBase. escape_special_characters(content)} elif isinstance(message, SystemMessage): return {'role': 'system', 'content': ContentFormatterBase. escape_special_characters(content)} elif isinstance(message, ChatMessage ) and message.role in LlamaContentFormatter.SUPPORTED_ROLES: return {'role': message.role, 'content': ContentFormatterBase. escape_special_characters(content)} else: supported = ','.join([role for role in LlamaContentFormatter. SUPPORTED_ROLES]) raise ValueError( f"""Received unsupported role. Supported roles for the LLaMa Foundation Model: {supported}""" )
@staticmethod def _convert_message_to_dict(message: BaseMessage) ->Dict: """Converts message to a dict according to role""" content = cast(str, message.content) if isinstance(message, HumanMessage): return {'role': 'user', 'content': ContentFormatterBase. escape_special_characters(content)} elif isinstance(message, AIMessage): return {'role': 'assistant', 'content': ContentFormatterBase. escape_special_characters(content)} elif isinstance(message, SystemMessage): return {'role': 'system', 'content': ContentFormatterBase. escape_special_characters(content)} elif isinstance(message, ChatMessage ) and message.role in LlamaContentFormatter.SUPPORTED_ROLES: return {'role': message.role, 'content': ContentFormatterBase. escape_special_characters(content)} else: supported = ','.join([role for role in LlamaContentFormatter. SUPPORTED_ROLES]) raise ValueError( f"""Received unsupported role. Supported roles for the LLaMa Foundation Model: {supported}""" )
Converts message to a dict according to role
_alias
self.write(t.name) if t.asname: self.write(' as ' + t.asname)
def _alias(self, t): self.write(t.name) if t.asname: self.write(' as ' + t.asname)
null
setUp
self.loader = CubeSemanticLoader(cube_api_url='http://example.com', cube_api_token='test_token')
def setUp(self) ->None: self.loader = CubeSemanticLoader(cube_api_url='http://example.com', cube_api_token='test_token')
null
lookup
"""Look up based on prompt and llm_string."""
@abstractmethod def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string."""
Look up based on prompt and llm_string.
_identifying_params
"""Get the identifying parameters.""" return {**{'model_name': self.model_name}, **self._default_params}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{'model_name': self.model_name}, **self._default_params}
Get the identifying parameters.
_stream_response_to_generation_chunk
"""Convert a stream response to a generation chunk.""" if not stream_response['choices']: return GenerationChunk(text='') return GenerationChunk(text=stream_response['choices'][0]['text'], generation_info=dict(finish_reason=stream_response['choices'][0].get( 'finish_reason', None), logprobs=stream_response['choices'][0].get( 'logprobs', None)))
def _stream_response_to_generation_chunk(stream_response: Dict[str, Any] ) ->GenerationChunk: """Convert a stream response to a generation chunk.""" if not stream_response['choices']: return GenerationChunk(text='') return GenerationChunk(text=stream_response['choices'][0]['text'], generation_info=dict(finish_reason=stream_response['choices'][0]. get('finish_reason', None), logprobs=stream_response['choices'][0]. get('logprobs', None)))
Convert a stream response to a generation chunk.
text
return RedisText(field)
@staticmethod def text(field: str) ->'RedisText': return RedisText(field)
null
test_transform_keeps_order
bs_transformer = BeautifulSoupTransformer() multiple_tags_html = ( '<h1>First heading.</h1><p>First paragraph.</p><h1>Second heading.</h1><p>Second paragraph.</p>' ) documents = [Document(page_content=multiple_tags_html)] docs_transformed_p_then_h1 = bs_transformer.transform_documents(documents, tags_to_extract=['p', 'h1']) assert docs_transformed_p_then_h1[0 ].page_content == 'First heading. First paragraph. Second heading. Second paragraph.' documents = [Document(page_content=multiple_tags_html)] docs_transformed_h1_then_p = bs_transformer.transform_documents(documents, tags_to_extract=['h1', 'p']) assert docs_transformed_h1_then_p[0 ].page_content == 'First heading. First paragraph. Second heading. Second paragraph.'
@pytest.mark.requires('bs4') def test_transform_keeps_order() ->None: bs_transformer = BeautifulSoupTransformer() multiple_tags_html = ( '<h1>First heading.</h1><p>First paragraph.</p><h1>Second heading.</h1><p>Second paragraph.</p>' ) documents = [Document(page_content=multiple_tags_html)] docs_transformed_p_then_h1 = bs_transformer.transform_documents(documents, tags_to_extract=['p', 'h1']) assert docs_transformed_p_then_h1[0 ].page_content == 'First heading. First paragraph. Second heading. Second paragraph.' documents = [Document(page_content=multiple_tags_html)] docs_transformed_h1_then_p = bs_transformer.transform_documents(documents, tags_to_extract=['h1', 'p']) assert docs_transformed_h1_then_p[0 ].page_content == 'First heading. First paragraph. Second heading. Second paragraph.'
null
_invocation_params
params = self._default_params if self.model_kwargs: params.update(self.model_kwargs) if self.stop_sequences is not None and stop_sequences is not None: raise ValueError('`stop` found in both the input and default params.') elif self.stop_sequences is not None: params['stop'] = self.stop_sequences else: params['stop'] = stop_sequences return {**params, **kwargs}
def _invocation_params(self, stop_sequences: Optional[List[str]], **kwargs: Any ) ->dict: params = self._default_params if self.model_kwargs: params.update(self.model_kwargs) if self.stop_sequences is not None and stop_sequences is not None: raise ValueError('`stop` found in both the input and default params.') elif self.stop_sequences is not None: params['stop'] = self.stop_sequences else: params['stop'] = stop_sequences return {**params, **kwargs}
null
test_awadb_add_texts
"""Test end to end adding of texts.""" texts = ['foo', 'bar', 'baz'] docsearch = AwaDB.from_texts(table_name='test_awadb', texts=texts, embedding=FakeEmbeddings()) docsearch.add_texts(['foo']) output = docsearch.similarity_search('foo', k=2) assert output == [Document(page_content='foo'), Document(page_content='foo')]
def test_awadb_add_texts() ->None: """Test end to end adding of texts.""" texts = ['foo', 'bar', 'baz'] docsearch = AwaDB.from_texts(table_name='test_awadb', texts=texts, embedding=FakeEmbeddings()) docsearch.add_texts(['foo']) output = docsearch.similarity_search('foo', k=2) assert output == [Document(page_content='foo'), Document(page_content= 'foo')]
Test end to end adding of texts.
test_openai_callback_agent
from langchain.agents import AgentType, initialize_agent, load_tools llm = OpenAI(temperature=0) tools = load_tools(['serpapi', 'llm-math'], llm=llm) agent = initialize_agent(tools, llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION, verbose=True) with get_openai_callback() as cb: agent.run( "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?" ) print(f'Total Tokens: {cb.total_tokens}') print(f'Prompt Tokens: {cb.prompt_tokens}') print(f'Completion Tokens: {cb.completion_tokens}') print(f'Total Cost (USD): ${cb.total_cost}')
def test_openai_callback_agent() ->None: from langchain.agents import AgentType, initialize_agent, load_tools llm = OpenAI(temperature=0) tools = load_tools(['serpapi', 'llm-math'], llm=llm) agent = initialize_agent(tools, llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION, verbose=True) with get_openai_callback() as cb: agent.run( "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?" ) print(f'Total Tokens: {cb.total_tokens}') print(f'Prompt Tokens: {cb.prompt_tokens}') print(f'Completion Tokens: {cb.completion_tokens}') print(f'Total Cost (USD): ${cb.total_cost}')
null
__init__
"""Initialize with bilibili url. Args: video_urls: List of bilibili urls. """ self.video_urls = video_urls
def __init__(self, video_urls: List[str]): """Initialize with bilibili url. Args: video_urls: List of bilibili urls. """ self.video_urls = video_urls
Initialize with bilibili url. Args: video_urls: List of bilibili urls.
save_local
"""Save ScaNN index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. """ path = Path(folder_path) scann_path = path / '{index_name}.scann'.format(index_name=index_name) scann_path.mkdir(exist_ok=True, parents=True) self.index.serialize(str(scann_path)) with open(path / '{index_name}.pkl'.format(index_name=index_name), 'wb') as f: pickle.dump((self.docstore, self.index_to_docstore_id), f)
def save_local(self, folder_path: str, index_name: str='index') ->None: """Save ScaNN index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. """ path = Path(folder_path) scann_path = path / '{index_name}.scann'.format(index_name=index_name) scann_path.mkdir(exist_ok=True, parents=True) self.index.serialize(str(scann_path)) with open(path / '{index_name}.pkl'.format(index_name=index_name), 'wb' ) as f: pickle.dump((self.docstore, self.index_to_docstore_id), f)
Save ScaNN index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to.
similarity_search_by_vector
"""The most k similar documents and scores of the specified query. Args: embeddings: embedding vector of the query. k: The k most similar documents to the text query. min_score: the score of similar documents to the text query Returns: The k most similar documents to the specified text query. 0 is dissimilar, 1 is the most similar. """ embed = np.array(embedding) if self.flag: query_data = {'query': {'sum': [{'field': 'text_embedding', 'feature': (embed / np.linalg.norm(embed)).tolist()}]}, 'size': k, 'fields': [ 'text', 'metadata']} query_result = self.vearch.search(self.using_db_name, self. using_table_name, query_data) res = query_result['hits']['hits'] else: query_data = {'vector': [{'field': 'text_embedding', 'feature': embed / np.linalg.norm(embed)}], 'fields': [], 'is_brute_search': 1, 'retrieval_param': {'metric_type': 'InnerProduct', 'nprobe': 20}, 'topn': k} query_result = self.vearch.search(query_data) res = query_result[0]['result_items'] docs = [] for item in res: content = '' meta_data = {} if self.flag: item = item['_source'] for item_key in item: if item_key == 'text': content = item[item_key] continue if item_key == 'metadata': meta_data['source'] = item[item_key] continue docs.append(Document(page_content=content, metadata=meta_data)) return docs
def similarity_search_by_vector(self, embedding: List[float], k: int= DEFAULT_TOPN, **kwargs: Any) ->List[Document]: """The most k similar documents and scores of the specified query. Args: embeddings: embedding vector of the query. k: The k most similar documents to the text query. min_score: the score of similar documents to the text query Returns: The k most similar documents to the specified text query. 0 is dissimilar, 1 is the most similar. """ embed = np.array(embedding) if self.flag: query_data = {'query': {'sum': [{'field': 'text_embedding', 'feature': (embed / np.linalg.norm(embed)).tolist()}]}, 'size': k, 'fields': ['text', 'metadata']} query_result = self.vearch.search(self.using_db_name, self. using_table_name, query_data) res = query_result['hits']['hits'] else: query_data = {'vector': [{'field': 'text_embedding', 'feature': embed / np.linalg.norm(embed)}], 'fields': [], 'is_brute_search': 1, 'retrieval_param': {'metric_type': 'InnerProduct', 'nprobe': 20}, 'topn': k} query_result = self.vearch.search(query_data) res = query_result[0]['result_items'] docs = [] for item in res: content = '' meta_data = {} if self.flag: item = item['_source'] for item_key in item: if item_key == 'text': content = item[item_key] continue if item_key == 'metadata': meta_data['source'] = item[item_key] continue docs.append(Document(page_content=content, metadata=meta_data)) return docs
The most k similar documents and scores of the specified query. Args: embeddings: embedding vector of the query. k: The k most similar documents to the text query. min_score: the score of similar documents to the text query Returns: The k most similar documents to the specified text query. 0 is dissimilar, 1 is the most similar.
_get_memories_until_limit
"""Reduce the number of tokens in the documents.""" result = [] for doc in self.memory_retriever.memory_stream[::-1]: if consumed_tokens >= self.max_tokens_limit: break consumed_tokens += self.llm.get_num_tokens(doc.page_content) if consumed_tokens < self.max_tokens_limit: result.append(doc) return self.format_memories_simple(result)
def _get_memories_until_limit(self, consumed_tokens: int) ->str: """Reduce the number of tokens in the documents.""" result = [] for doc in self.memory_retriever.memory_stream[::-1]: if consumed_tokens >= self.max_tokens_limit: break consumed_tokens += self.llm.get_num_tokens(doc.page_content) if consumed_tokens < self.max_tokens_limit: result.append(doc) return self.format_memories_simple(result)
Reduce the number of tokens in the documents.
test_pdfminer_loader
"""Test PDFMiner loader.""" file_path = Path(__file__).parent.parent / 'examples/hello.pdf' loader = PDFMinerLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / 'examples/layout-parser-paper.pdf' loader = PDFMinerLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / 'examples/hello.pdf' loader = PDFMinerLoader(str(file_path), concatenate_pages=True) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / 'examples/layout-parser-paper.pdf' loader = PDFMinerLoader(str(file_path), concatenate_pages=False) docs = loader.load() assert len(docs) == 16
def test_pdfminer_loader() ->None: """Test PDFMiner loader.""" file_path = Path(__file__).parent.parent / 'examples/hello.pdf' loader = PDFMinerLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__ ).parent.parent / 'examples/layout-parser-paper.pdf' loader = PDFMinerLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / 'examples/hello.pdf' loader = PDFMinerLoader(str(file_path), concatenate_pages=True) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__ ).parent.parent / 'examples/layout-parser-paper.pdf' loader = PDFMinerLoader(str(file_path), concatenate_pages=False) docs = loader.load() assert len(docs) == 16
Test PDFMiner loader.
_import_tongyi
from langchain_community.llms.tongyi import Tongyi return Tongyi
def _import_tongyi() ->Any: from langchain_community.llms.tongyi import Tongyi return Tongyi
null
test_confluence_loader_initialization
ConfluenceLoader(self.CONFLUENCE_URL, username=self.MOCK_USERNAME, api_key= self.MOCK_API_TOKEN) mock_confluence.assert_called_once_with(url=self.CONFLUENCE_URL, username= '[email protected]', password='api_token', cloud=True)
def test_confluence_loader_initialization(self, mock_confluence: MagicMock ) ->None: ConfluenceLoader(self.CONFLUENCE_URL, username=self.MOCK_USERNAME, api_key=self.MOCK_API_TOKEN) mock_confluence.assert_called_once_with(url=self.CONFLUENCE_URL, username='[email protected]', password='api_token', cloud=True)
null
raise_deprecation
"""Raise deprecation warning if callback_manager is used.""" if values.get('callback_manager') is not None: warnings.warn( 'callback_manager is deprecated. Please use callbacks instead.', DeprecationWarning) values['callbacks'] = values.pop('callback_manager', None) return values
@root_validator() def raise_deprecation(cls, values: Dict) ->Dict: """Raise deprecation warning if callback_manager is used.""" if values.get('callback_manager') is not None: warnings.warn( 'callback_manager is deprecated. Please use callbacks instead.', DeprecationWarning) values['callbacks'] = values.pop('callback_manager', None) return values
Raise deprecation warning if callback_manager is used.
_import_baiducloud_vector_search
from langchain_community.vectorstores.baiducloud_vector_search import BESVectorStore return BESVectorStore
def _import_baiducloud_vector_search() ->Any: from langchain_community.vectorstores.baiducloud_vector_search import BESVectorStore return BESVectorStore
null
_response_to_result
"""Converts a PaLM API response into a LangChain ChatResult.""" llm_output = {} if response.prompt_feedback: try: prompt_feedback = type(response.prompt_feedback).to_dict(response. prompt_feedback, use_integers_for_enums=False) llm_output['prompt_feedback'] = prompt_feedback except Exception as e: logger.debug(f'Unable to convert prompt_feedback to dict: {e}') generations: List[ChatGeneration] = [] role_map = {'model': ai_msg_t, 'user': human_msg_t} for candidate in response.candidates: content = candidate.content parts_content = _parts_to_content(content.parts) if content.role not in role_map: logger.warning( f'Unrecognized role: {content.role}. Treating as a ChatMessage.') msg = chat_msg_t(content=parts_content, role=content.role) else: msg = role_map[content.role](content=parts_content) generation_info = {} if candidate.finish_reason: generation_info['finish_reason'] = candidate.finish_reason.name if candidate.safety_ratings: generation_info['safety_ratings'] = [type(rating).to_dict(rating) for rating in candidate.safety_ratings] generations.append(generation_t(message=msg, generation_info= generation_info)) if not response.candidates: logger.warning( f"""Gemini produced an empty response. Continuing with empty message Feedback: {response.prompt_feedback}""" ) generations = [generation_t(message=ai_msg_t(content=''), generation_info={})] return ChatResult(generations=generations, llm_output=llm_output)
def _response_to_result(response: genai.types.GenerateContentResponse, ai_msg_t: Type[BaseMessage]=AIMessage, human_msg_t: Type[BaseMessage]= HumanMessage, chat_msg_t: Type[BaseMessage]=ChatMessage, generation_t: Type[ChatGeneration]=ChatGeneration) ->ChatResult: """Converts a PaLM API response into a LangChain ChatResult.""" llm_output = {} if response.prompt_feedback: try: prompt_feedback = type(response.prompt_feedback).to_dict(response .prompt_feedback, use_integers_for_enums=False) llm_output['prompt_feedback'] = prompt_feedback except Exception as e: logger.debug(f'Unable to convert prompt_feedback to dict: {e}') generations: List[ChatGeneration] = [] role_map = {'model': ai_msg_t, 'user': human_msg_t} for candidate in response.candidates: content = candidate.content parts_content = _parts_to_content(content.parts) if content.role not in role_map: logger.warning( f'Unrecognized role: {content.role}. Treating as a ChatMessage.' ) msg = chat_msg_t(content=parts_content, role=content.role) else: msg = role_map[content.role](content=parts_content) generation_info = {} if candidate.finish_reason: generation_info['finish_reason'] = candidate.finish_reason.name if candidate.safety_ratings: generation_info['safety_ratings'] = [type(rating).to_dict( rating) for rating in candidate.safety_ratings] generations.append(generation_t(message=msg, generation_info= generation_info)) if not response.candidates: logger.warning( f"""Gemini produced an empty response. Continuing with empty message Feedback: {response.prompt_feedback}""" ) generations = [generation_t(message=ai_msg_t(content=''), generation_info={})] return ChatResult(generations=generations, llm_output=llm_output)
Converts a PaLM API response into a LangChain ChatResult.
_identifying_params
return self._default_params
@property def _identifying_params(self) ->Dict[str, Any]: return self._default_params
null
_import_playwright_ClickTool
from langchain_community.tools.playwright import ClickTool return ClickTool
def _import_playwright_ClickTool() ->Any: from langchain_community.tools.playwright import ClickTool return ClickTool
null
load
"""Load documents.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load documents.""" return list(self.lazy_load())
Load documents.
test_results
"""Test that call gives the correct answer.""" search = BingSearchAPIWrapper() results = search.results("Obama's first name", num_results=5) result_contents = '\n'.join(f"{result['title']}: {result['snippet']}" for result in results) assert 'Barack Hussein Obama' in result_contents
def test_results() ->None: """Test that call gives the correct answer.""" search = BingSearchAPIWrapper() results = search.results("Obama's first name", num_results=5) result_contents = '\n'.join(f"{result['title']}: {result['snippet']}" for result in results) assert 'Barack Hussein Obama' in result_contents
Test that call gives the correct answer.
_is_delta_sync_index
"""Return True if the index is a delta-sync index.""" return self.index_type == 'DELTA_SYNC'
def _is_delta_sync_index(self) ->bool: """Return True if the index is a delta-sync index.""" return self.index_type == 'DELTA_SYNC'
Return True if the index is a delta-sync index.
main
pass
@app.callback() def main(version: bool=typer.Option(False, '--version', '-v', help= 'Print the current CLI version.', callback=version_callback, is_eager=True) ): pass
null
__init__
self._commands: List[str] = [] self.output = output
def __init__(self, output: str='') ->None: self._commands: List[str] = [] self.output = output
null
yield_keys
"""Yield keys in the store.""" if prefix: pattern = self._get_prefixed_key(prefix) else: pattern = self._get_prefixed_key('*') scan_iter = cast(Iterator[bytes], self.client.scan_iter(match=pattern)) for key in scan_iter: decoded_key = key.decode('utf-8') if self.namespace: relative_key = decoded_key[len(self.namespace) + 1:] yield relative_key else: yield decoded_key
def yield_keys(self, *, prefix: Optional[str]=None) ->Iterator[str]: """Yield keys in the store.""" if prefix: pattern = self._get_prefixed_key(prefix) else: pattern = self._get_prefixed_key('*') scan_iter = cast(Iterator[bytes], self.client.scan_iter(match=pattern)) for key in scan_iter: decoded_key = key.decode('utf-8') if self.namespace: relative_key = decoded_key[len(self.namespace) + 1:] yield relative_key else: yield decoded_key
Yield keys in the store.
update_with_delayed_score
""" Updates the learned policy with the score provided. Will raise an error if selection_scorer is set, and force_score=True was not provided during the method call """ if self._can_use_selection_scorer() and not force_score: raise RuntimeError( 'The selection scorer is set, and force_score was not set to True. Please set force_score=True to use this function.' ) if self.metrics: self.metrics.on_feedback(score) event: TEvent = chain_response['selection_metadata'] self._call_after_scoring_before_learning(event=event, score=score) self.active_policy.learn(event=event) self.active_policy.log(event=event)
def update_with_delayed_score(self, score: float, chain_response: Dict[str, Any], force_score: bool=False) ->None: """ Updates the learned policy with the score provided. Will raise an error if selection_scorer is set, and force_score=True was not provided during the method call """ if self._can_use_selection_scorer() and not force_score: raise RuntimeError( 'The selection scorer is set, and force_score was not set to True. Please set force_score=True to use this function.' ) if self.metrics: self.metrics.on_feedback(score) event: TEvent = chain_response['selection_metadata'] self._call_after_scoring_before_learning(event=event, score=score) self.active_policy.learn(event=event) self.active_policy.log(event=event)
Updates the learned policy with the score provided. Will raise an error if selection_scorer is set, and force_score=True was not provided during the method call
wrong_output_format
assert 'foo' in inputs assert 'baz' in inputs return 'hehe'
def wrong_output_format(inputs: dict) ->str: assert 'foo' in inputs assert 'baz' in inputs return 'hehe'
null
test_non_zero_distance
eval_chain = StringDistanceEvalChain(distance=distance, normalize_score= normalize_score) prediction = 'I like to eat apples.' reference = 'I like apples.' result = eval_chain.evaluate_strings(prediction=prediction, reference=reference ) assert 'score' in result assert 0 < result['score'] if normalize_score: assert result['score'] < 1.0
@pytest.mark.requires('rapidfuzz') @pytest.mark.parametrize('distance', valid_distances) @pytest.mark.parametrize('normalize_score', [True, False]) def test_non_zero_distance(distance: StringDistance, normalize_score: bool ) ->None: eval_chain = StringDistanceEvalChain(distance=distance, normalize_score =normalize_score) prediction = 'I like to eat apples.' reference = 'I like apples.' result = eval_chain.evaluate_strings(prediction=prediction, reference= reference) assert 'score' in result assert 0 < result['score'] if normalize_score: assert result['score'] < 1.0
null
_permute
"""Sort texts in ascending order, and delivers a lambda expr, which can sort a same length list https://github.com/UKPLab/sentence-transformers/blob/ c5f93f70eca933c78695c5bc686ceda59651ae3b/sentence_transformers/SentenceTransformer.py#L156 Args: texts (List[str]): _description_ sorter (Callable, optional): _description_. Defaults to len. Returns: Tuple[List[str], Callable]: _description_ Example: ``` texts = ["one","three","four"] perm_texts, undo = self._permute(texts) texts == undo(perm_texts) ``` """ if len(texts) == 1: return texts, lambda t: t length_sorted_idx = np.argsort([(-sorter(sen)) for sen in texts]) texts_sorted = [texts[idx] for idx in length_sorted_idx] return texts_sorted, lambda unsorted_embeddings: [unsorted_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
@staticmethod def _permute(texts: List[str], sorter: Callable=len) ->Tuple[List[str], Callable]: """Sort texts in ascending order, and delivers a lambda expr, which can sort a same length list https://github.com/UKPLab/sentence-transformers/blob/ c5f93f70eca933c78695c5bc686ceda59651ae3b/sentence_transformers/SentenceTransformer.py#L156 Args: texts (List[str]): _description_ sorter (Callable, optional): _description_. Defaults to len. Returns: Tuple[List[str], Callable]: _description_ Example: ``` texts = ["one","three","four"] perm_texts, undo = self._permute(texts) texts == undo(perm_texts) ``` """ if len(texts) == 1: return texts, lambda t: t length_sorted_idx = np.argsort([(-sorter(sen)) for sen in texts]) texts_sorted = [texts[idx] for idx in length_sorted_idx] return texts_sorted, lambda unsorted_embeddings: [unsorted_embeddings[ idx] for idx in np.argsort(length_sorted_idx)]
Sort texts in ascending order, and delivers a lambda expr, which can sort a same length list https://github.com/UKPLab/sentence-transformers/blob/ c5f93f70eca933c78695c5bc686ceda59651ae3b/sentence_transformers/SentenceTransformer.py#L156 Args: texts (List[str]): _description_ sorter (Callable, optional): _description_. Defaults to len. Returns: Tuple[List[str], Callable]: _description_ Example: ``` texts = ["one","three","four"] perm_texts, undo = self._permute(texts) texts == undo(perm_texts) ```
__init__
super().__init__() self.file_path = path if isinstance(path, Path) else Path(path)
def __init__(self, path: Union[Path, str]) ->None: super().__init__() self.file_path = path if isinstance(path, Path) else Path(path)
null
aggregate_lines_to_chunks
"""Combine lines with common metadata into chunks Args: lines: Line of text / associated header metadata """ aggregated_chunks: List[LineType] = [] for line in lines: if aggregated_chunks and aggregated_chunks[-1]['metadata'] == line[ 'metadata']: aggregated_chunks[-1]['content'] += ' \n' + line['content'] elif aggregated_chunks and aggregated_chunks[-1]['metadata'] != line[ 'metadata'] and len(aggregated_chunks[-1]['metadata']) < len(line[ 'metadata']) and aggregated_chunks[-1]['content'].split('\n')[-1][0 ] == '#' and not self.strip_headers: aggregated_chunks[-1]['content'] += ' \n' + line['content'] aggregated_chunks[-1]['metadata'] = line['metadata'] else: aggregated_chunks.append(line) return [Document(page_content=chunk['content'], metadata=chunk['metadata']) for chunk in aggregated_chunks]
def aggregate_lines_to_chunks(self, lines: List[LineType]) ->List[Document]: """Combine lines with common metadata into chunks Args: lines: Line of text / associated header metadata """ aggregated_chunks: List[LineType] = [] for line in lines: if aggregated_chunks and aggregated_chunks[-1]['metadata'] == line[ 'metadata']: aggregated_chunks[-1]['content'] += ' \n' + line['content'] elif aggregated_chunks and aggregated_chunks[-1]['metadata'] != line[ 'metadata'] and len(aggregated_chunks[-1]['metadata']) < len(line ['metadata']) and aggregated_chunks[-1]['content'].split('\n')[-1][ 0] == '#' and not self.strip_headers: aggregated_chunks[-1]['content'] += ' \n' + line['content'] aggregated_chunks[-1]['metadata'] = line['metadata'] else: aggregated_chunks.append(line) return [Document(page_content=chunk['content'], metadata=chunk[ 'metadata']) for chunk in aggregated_chunks]
Combine lines with common metadata into chunks Args: lines: Line of text / associated header metadata
_identifying_params
"""Get the identifying parameters.""" return {'base_url': self.base_url, **{}, **self._default_params}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {'base_url': self.base_url, **{}, **self._default_params}
Get the identifying parameters.
similarity_search_with_relevance_scores
"""Perform similarity retrieval based on text with scores. Args: query: Vectorize text for retrieval.,should not be empty. k: top n. search_filter: Additional filtering conditions. Returns: document_list: List of documents. """ embedding: List[float] = self.embedding.embed_query(query) return self.create_results_with_score(self.inner_embedding_query(embedding= embedding, search_filter=search_filter, k=k))
def similarity_search_with_relevance_scores(self, query: str, k: int=4, search_filter: Optional[dict]=None, **kwargs: Any) ->List[Tuple[ Document, float]]: """Perform similarity retrieval based on text with scores. Args: query: Vectorize text for retrieval.,should not be empty. k: top n. search_filter: Additional filtering conditions. Returns: document_list: List of documents. """ embedding: List[float] = self.embedding.embed_query(query) return self.create_results_with_score(self.inner_embedding_query( embedding=embedding, search_filter=search_filter, k=k))
Perform similarity retrieval based on text with scores. Args: query: Vectorize text for retrieval.,should not be empty. k: top n. search_filter: Additional filtering conditions. Returns: document_list: List of documents.
create_client
try: from zep_python import ZepClient except ImportError: raise ImportError( 'Could not import zep-python package. Please install it with `pip install zep-python`.' ) values['zep_client'] = values.get('zep_client', ZepClient(base_url=values[ 'url'], api_key=values.get('api_key'))) return values
@root_validator(pre=True) def create_client(cls, values: dict) ->dict: try: from zep_python import ZepClient except ImportError: raise ImportError( 'Could not import zep-python package. Please install it with `pip install zep-python`.' ) values['zep_client'] = values.get('zep_client', ZepClient(base_url= values['url'], api_key=values.get('api_key'))) return values
null
similarity_search
vector = self._embedding.embed_query(query) return self.similarity_search_by_vector(vector, k=k, filter=filter, **kwargs)
def similarity_search(self, query: str, k: int=4, filter: Optional[Dict[str, Any]]=None, **kwargs: Any) ->List[Document]: vector = self._embedding.embed_query(query) return self.similarity_search_by_vector(vector, k=k, filter=filter, ** kwargs)
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.exceptions import NotFoundError from elasticsearch.helpers import bulk except ImportError: raise ImportError( 'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.' ) requests = [] ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self.embedding.embed_documents(list(texts)) dim = len(embeddings[0]) mapping = _default_text_mapping(dim) try: self.client.indices.get(index=self.index_name) except NotFoundError: self.create_index(self.client, self.index_name, mapping) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} request = {'_op_type': 'index', '_index': self.index_name, 'vector': embeddings[i], 'text': text, 'metadata': metadata, '_id': ids[i]} requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, refresh_indices: bool=True, ** kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.exceptions import NotFoundError from elasticsearch.helpers import bulk except ImportError: raise ImportError( 'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.' ) requests = [] ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self.embedding.embed_documents(list(texts)) dim = len(embeddings[0]) mapping = _default_text_mapping(dim) try: self.client.indices.get(index=self.index_name) except NotFoundError: self.create_index(self.client, self.index_name, mapping) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} request = {'_op_type': 'index', '_index': self.index_name, 'vector': embeddings[i], 'text': text, 'metadata': metadata, '_id': ids[i]} requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the vectorstore.
_llm_type
"""Return type of chat model.""" return 'chat-anthropic-messages'
@property def _llm_type(self) ->str: """Return type of chat model.""" return 'chat-anthropic-messages'
Return type of chat model.
on_llm_end
self.saved_things['generation'] = args[0]
def on_llm_end(self, *args: Any, **kwargs: Any) ->Any: self.saved_things['generation'] = args[0]
null
test_formatting
result = convert_messages_to_prompt_anthropic(messages) assert result == expected
@pytest.mark.parametrize(('messages', 'expected'), [([HumanMessage(content= 'Hello')], """ Human: Hello Assistant:"""), ([HumanMessage(content= 'Hello'), AIMessage(content='Answer:')], """ Human: Hello Assistant: Answer:"""), ([SystemMessage(content= "You're an assistant"), HumanMessage(content='Hello'), AIMessage( content='Answer:')], """You're an assistant Human: Hello Assistant: Answer:""")]) def test_formatting(messages: List[BaseMessage], expected: str) ->None: result = convert_messages_to_prompt_anthropic(messages) assert result == expected
null
test_explicitly_no_scorer
llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, selection_scorer=None, feature_embedder=pick_best_chain. PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder())) response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain. ToSelectFrom(['0', '1', '2'])) assert response['response'] == 'hey' selection_metadata = response['selection_metadata'] assert selection_metadata.selected.score is None
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers') def test_explicitly_no_scorer() ->None: llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, selection_scorer=None, feature_embedder=pick_best_chain. PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder())) response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain. ToSelectFrom(['0', '1', '2'])) assert response['response'] == 'hey' selection_metadata = response['selection_metadata'] assert selection_metadata.selected.score is None
null
test_continue_on_failure_true
"""Test exception is not raised when continue_on_failure=True.""" loader = NewsURLLoader(['badurl.foobar']) loader.load()
def test_continue_on_failure_true() ->None: """Test exception is not raised when continue_on_failure=True.""" loader = NewsURLLoader(['badurl.foobar']) loader.load()
Test exception is not raised when continue_on_failure=True.
as_bytes_io
"""Read data as a byte stream.""" if isinstance(self.data, bytes): yield BytesIO(self.data) elif self.data is None and self.path: with open(str(self.path), 'rb') as f: yield f else: raise NotImplementedError(f'Unable to convert blob {self}')
@contextlib.contextmanager def as_bytes_io(self) ->Generator[Union[BytesIO, BufferedReader], None, None]: """Read data as a byte stream.""" if isinstance(self.data, bytes): yield BytesIO(self.data) elif self.data is None and self.path: with open(str(self.path), 'rb') as f: yield f else: raise NotImplementedError(f'Unable to convert blob {self}')
Read data as a byte stream.
combine_embeddings
"""Combine embeddings into final embeddings.""" return list(np.array(embeddings).mean(axis=0))
def combine_embeddings(self, embeddings: List[List[float]]) ->List[float]: """Combine embeddings into final embeddings.""" return list(np.array(embeddings).mean(axis=0))
Combine embeddings into final embeddings.
embed_query
"""Call out to Clarifai's embedding models. Args: text: The text to embed. Returns: Embeddings for the text. """ try: from clarifai.client.model import Model except ImportError: raise ImportError( 'Could not import clarifai python package. Please install it with `pip install clarifai`.' ) if self.pat is not None: pat = self.pat if self.model_url is not None: _model_init = Model(url=self.model_url, pat=pat) else: _model_init = Model(model_id=self.model_id, user_id=self.user_id, app_id=self.app_id, pat=pat) try: predict_response = _model_init.predict_by_bytes(bytes(text, 'utf-8'), input_type='text') embeddings = [list(op.data.embeddings[0].vector) for op in predict_response.outputs] except Exception as e: logger.error(f'Predict failed, exception: {e}') return embeddings[0]
def embed_query(self, text: str) ->List[float]: """Call out to Clarifai's embedding models. Args: text: The text to embed. Returns: Embeddings for the text. """ try: from clarifai.client.model import Model except ImportError: raise ImportError( 'Could not import clarifai python package. Please install it with `pip install clarifai`.' ) if self.pat is not None: pat = self.pat if self.model_url is not None: _model_init = Model(url=self.model_url, pat=pat) else: _model_init = Model(model_id=self.model_id, user_id=self.user_id, app_id=self.app_id, pat=pat) try: predict_response = _model_init.predict_by_bytes(bytes(text, 'utf-8' ), input_type='text') embeddings = [list(op.data.embeddings[0].vector) for op in predict_response.outputs] except Exception as e: logger.error(f'Predict failed, exception: {e}') return embeddings[0]
Call out to Clarifai's embedding models. Args: text: The text to embed. Returns: Embeddings for the text.
test_conversation_chain_errors_bad_variable
"""Test that conversation chain raise error with bad variable.""" llm = FakeLLM() prompt = PromptTemplate(input_variables=['foo'], template='{foo}') memory = ConversationBufferMemory(memory_key='foo') with pytest.raises(ValueError): ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key='foo')
def test_conversation_chain_errors_bad_variable() ->None: """Test that conversation chain raise error with bad variable.""" llm = FakeLLM() prompt = PromptTemplate(input_variables=['foo'], template='{foo}') memory = ConversationBufferMemory(memory_key='foo') with pytest.raises(ValueError): ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key= 'foo')
Test that conversation chain raise error with bad variable.
_import_tair
from langchain_community.vectorstores.tair import Tair return Tair
def _import_tair() ->Any: from langchain_community.vectorstores.tair import Tair return Tair
null
_prepare_query
new_query, new_kwargs = (self.structured_query_translator. visit_structured_query(structured_query)) if structured_query.limit is not None: new_kwargs['k'] = structured_query.limit if self.use_original_query: new_query = query search_kwargs = {**self.search_kwargs, **new_kwargs} return new_query, search_kwargs
def _prepare_query(self, query: str, structured_query: StructuredQuery ) ->Tuple[str, Dict[str, Any]]: new_query, new_kwargs = (self.structured_query_translator. visit_structured_query(structured_query)) if structured_query.limit is not None: new_kwargs['k'] = structured_query.limit if self.use_original_query: new_query = query search_kwargs = {**self.search_kwargs, **new_kwargs} return new_query, search_kwargs
null
format_tool_to_openai_function
"""Format tool into the OpenAI function API.""" if tool.args_schema: return convert_pydantic_to_openai_function(tool.args_schema, name=tool. name, description=tool.description) else: return {'name': tool.name, 'description': tool.description, 'parameters': {'properties': {'__arg1': {'title': '__arg1', 'type': 'string'}}, 'required': ['__arg1'], 'type': 'object'}}
def format_tool_to_openai_function(tool: BaseTool) ->FunctionDescription: """Format tool into the OpenAI function API.""" if tool.args_schema: return convert_pydantic_to_openai_function(tool.args_schema, name= tool.name, description=tool.description) else: return {'name': tool.name, 'description': tool.description, 'parameters': {'properties': {'__arg1': {'title': '__arg1', 'type': 'string'}}, 'required': ['__arg1'], 'type': 'object'}}
Format tool into the OpenAI function API.
_parse_response
if len(response) == 1: result = self._parse_json(response[0]) else: for entry in response: if entry.get('provider') == 'eden-ai': result = self._parse_json(entry) return result
def _parse_response(self, response: list) ->str: if len(response) == 1: result = self._parse_json(response[0]) else: for entry in response: if entry.get('provider') == 'eden-ai': result = self._parse_json(entry) return result
null
aggregate_elements_to_chunks
"""Combine elements with common metadata into chunks Args: elements: HTML element content with associated identifying info and metadata """ aggregated_chunks: List[ElementType] = [] for element in elements: if aggregated_chunks and aggregated_chunks[-1]['metadata'] == element[ 'metadata']: aggregated_chunks[-1]['content'] += ' \n' + element['content'] else: aggregated_chunks.append(element) return [Document(page_content=chunk['content'], metadata=chunk['metadata']) for chunk in aggregated_chunks]
def aggregate_elements_to_chunks(self, elements: List[ElementType]) ->List[ Document]: """Combine elements with common metadata into chunks Args: elements: HTML element content with associated identifying info and metadata """ aggregated_chunks: List[ElementType] = [] for element in elements: if aggregated_chunks and aggregated_chunks[-1]['metadata'] == element[ 'metadata']: aggregated_chunks[-1]['content'] += ' \n' + element['content'] else: aggregated_chunks.append(element) return [Document(page_content=chunk['content'], metadata=chunk[ 'metadata']) for chunk in aggregated_chunks]
Combine elements with common metadata into chunks Args: elements: HTML element content with associated identifying info and metadata
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
test_not_an_ai
err = f'Expected an AI message got {str(SystemMessage)}' with pytest.raises(TypeError, match=err): _parse_ai_message(SystemMessage(content='x'))
def test_not_an_ai(self) ->None: err = f'Expected an AI message got {str(SystemMessage)}' with pytest.raises(TypeError, match=err): _parse_ai_message(SystemMessage(content='x'))
null
test_duckdb_loader_metadata_columns
"""Test DuckDB loader.""" loader = DuckDBLoader('SELECT 1 AS a, 2 AS b', page_content_columns=['a'], metadata_columns=['b']) docs = loader.load() assert len(docs) == 1 assert docs[0].page_content == 'a: 1' assert docs[0].metadata == {'b': 2}
@unittest.skipIf(not duckdb_installed, 'duckdb not installed') def test_duckdb_loader_metadata_columns() ->None: """Test DuckDB loader.""" loader = DuckDBLoader('SELECT 1 AS a, 2 AS b', page_content_columns=[ 'a'], metadata_columns=['b']) docs = loader.load() assert len(docs) == 1 assert docs[0].page_content == 'a: 1' assert docs[0].metadata == {'b': 2}
Test DuckDB loader.
_import_spark_sql_tool_ListSparkSQLTool
from langchain_community.tools.spark_sql.tool import ListSparkSQLTool return ListSparkSQLTool
def _import_spark_sql_tool_ListSparkSQLTool() ->Any: from langchain_community.tools.spark_sql.tool import ListSparkSQLTool return ListSparkSQLTool
null
_stream
message_dicts = self._create_message_dicts(messages) default_chunk_class = AIMessageChunk params = {'model': self.model, 'messages': message_dicts, 'stream': True, **self.model_kwargs, **kwargs} for chunk in completion_with_retry(self, self.use_retry, run_manager= run_manager, stop=stop, **params): choice = chunk.choices[0] chunk = _convert_delta_to_message_chunk(choice.delta, default_chunk_class) finish_reason = choice.finish_reason generation_info = dict(finish_reason=finish_reason ) if finish_reason is not None else None default_chunk_class = chunk.__class__ chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: message_dicts = self._create_message_dicts(messages) default_chunk_class = AIMessageChunk params = {'model': self.model, 'messages': message_dicts, 'stream': True, **self.model_kwargs, **kwargs} for chunk in completion_with_retry(self, self.use_retry, run_manager= run_manager, stop=stop, **params): choice = chunk.choices[0] chunk = _convert_delta_to_message_chunk(choice.delta, default_chunk_class) finish_reason = choice.finish_reason generation_info = dict(finish_reason=finish_reason ) if finish_reason is not None else None default_chunk_class = chunk.__class__ chunk = ChatGenerationChunk(message=chunk, generation_info= generation_info) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk)
null
test_qdrant_similarity_search_filters_with_qdrant_filters
"""Test end to end construction and search.""" from qdrant_client.http import models as rest texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i, 'details': {'page': i + 1, 'pages': [i + 2, -1]}} for i in range(len(texts))] docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(), metadatas= metadatas, location=':memory:', vector_name=vector_name) qdrant_filter = rest.Filter(must=[rest.FieldCondition(key='metadata.page', match=rest.MatchValue(value=1)), rest.FieldCondition(key= 'metadata.details.page', match=rest.MatchValue(value=2)), rest. FieldCondition(key='metadata.details.pages', match=rest.MatchAny(any=[3]))] ) output = docsearch.similarity_search('foo', k=1, filter=qdrant_filter) assert output == [Document(page_content='bar', metadata={'page': 1, 'details': {'page': 2, 'pages': [3, -1]}})]
@pytest.mark.parametrize('vector_name', [None, 'my-vector']) def test_qdrant_similarity_search_filters_with_qdrant_filters(vector_name: Optional[str]) ->None: """Test end to end construction and search.""" from qdrant_client.http import models as rest texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i, 'details': {'page': i + 1, 'pages': [i + 2, -1 ]}} for i in range(len(texts))] docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(), metadatas=metadatas, location=':memory:', vector_name=vector_name) qdrant_filter = rest.Filter(must=[rest.FieldCondition(key= 'metadata.page', match=rest.MatchValue(value=1)), rest. FieldCondition(key='metadata.details.page', match=rest.MatchValue( value=2)), rest.FieldCondition(key='metadata.details.pages', match= rest.MatchAny(any=[3]))]) output = docsearch.similarity_search('foo', k=1, filter=qdrant_filter) assert output == [Document(page_content='bar', metadata={'page': 1, 'details': {'page': 2, 'pages': [3, -1]}})]
Test end to end construction and search.
from_texts
"""Create ClickHouse wrapper with existing texts Args: embedding_function (Embeddings): Function to extract text embedding texts (Iterable[str]): List or tuple of strings to be added config (ClickHouseSettings, Optional): ClickHouse configuration text_ids (Optional[Iterable], optional): IDs for the texts. Defaults to None. batch_size (int, optional): Batchsize when transmitting data to ClickHouse. Defaults to 32. metadata (List[dict], optional): metadata to texts. Defaults to None. Other keyword arguments will pass into [clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api) Returns: ClickHouse Index """ ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas) return ctx
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]]=None, config: Optional[ ClickhouseSettings]=None, text_ids: Optional[Iterable[str]]=None, batch_size: int=32, **kwargs: Any) ->Clickhouse: """Create ClickHouse wrapper with existing texts Args: embedding_function (Embeddings): Function to extract text embedding texts (Iterable[str]): List or tuple of strings to be added config (ClickHouseSettings, Optional): ClickHouse configuration text_ids (Optional[Iterable], optional): IDs for the texts. Defaults to None. batch_size (int, optional): Batchsize when transmitting data to ClickHouse. Defaults to 32. metadata (List[dict], optional): metadata to texts. Defaults to None. Other keyword arguments will pass into [clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api) Returns: ClickHouse Index """ ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas= metadatas) return ctx
Create ClickHouse wrapper with existing texts Args: embedding_function (Embeddings): Function to extract text embedding texts (Iterable[str]): List or tuple of strings to be added config (ClickHouseSettings, Optional): ClickHouse configuration text_ids (Optional[Iterable], optional): IDs for the texts. Defaults to None. batch_size (int, optional): Batchsize when transmitting data to ClickHouse. Defaults to 32. metadata (List[dict], optional): metadata to texts. Defaults to None. Other keyword arguments will pass into [clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api) Returns: ClickHouse Index
__init__
"""Init the pipeline with an auxiliary function. The load function must be in global scope to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ super().__init__(**kwargs) try: import runhouse as rh except ImportError: raise ImportError( 'Could not import runhouse python package. Please install it with `pip install runhouse`.' ) remote_load_fn = rh.function(fn=self.model_load_fn).to(self.hardware, reqs= self.model_reqs) _load_fn_kwargs = self.load_fn_kwargs or {} self.pipeline_ref = remote_load_fn.remote(**_load_fn_kwargs) self.client = rh.function(fn=self.inference_fn).to(self.hardware, reqs=self .model_reqs)
def __init__(self, **kwargs: Any): """Init the pipeline with an auxiliary function. The load function must be in global scope to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ super().__init__(**kwargs) try: import runhouse as rh except ImportError: raise ImportError( 'Could not import runhouse python package. Please install it with `pip install runhouse`.' ) remote_load_fn = rh.function(fn=self.model_load_fn).to(self.hardware, reqs=self.model_reqs) _load_fn_kwargs = self.load_fn_kwargs or {} self.pipeline_ref = remote_load_fn.remote(**_load_fn_kwargs) self.client = rh.function(fn=self.inference_fn).to(self.hardware, reqs= self.model_reqs)
Init the pipeline with an auxiliary function. The load function must be in global scope to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function.
test_deeplakewith_persistence
"""Test end to end construction and search, with persistence.""" import deeplake dataset_path = './tests/persist_dir' if deeplake.exists(dataset_path): deeplake.delete(dataset_path) texts = ['foo', 'bar', 'baz'] docsearch = DeepLake.from_texts(dataset_path=dataset_path, texts=texts, embedding=FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] docsearch = DeepLake(dataset_path=dataset_path, embedding_function= FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) docsearch.delete_dataset()
def test_deeplakewith_persistence() ->None: """Test end to end construction and search, with persistence.""" import deeplake dataset_path = './tests/persist_dir' if deeplake.exists(dataset_path): deeplake.delete(dataset_path) texts = ['foo', 'bar', 'baz'] docsearch = DeepLake.from_texts(dataset_path=dataset_path, texts=texts, embedding=FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] docsearch = DeepLake(dataset_path=dataset_path, embedding_function= FakeEmbeddings()) output = docsearch.similarity_search('foo', k=1) docsearch.delete_dataset()
Test end to end construction and search, with persistence.
_sync_request_embed
response = requests.post(**self._kwargs_post_request(model=model, texts= batch_texts)) if response.status_code != 200: raise Exception( f'Infinity returned an unexpected response with status {response.status_code}: {response.text}' ) return [e['embedding'] for e in response.json()['data']]
def _sync_request_embed(self, model: str, batch_texts: List[str]) ->List[List [float]]: response = requests.post(**self._kwargs_post_request(model=model, texts =batch_texts)) if response.status_code != 200: raise Exception( f'Infinity returned an unexpected response with status {response.status_code}: {response.text}' ) return [e['embedding'] for e in response.json()['data']]
null
test_load_uses_page_content_column_to_create_document_text
sample_data_frame = sample_data_frame.rename(mapping={'text': 'dummy_test_column'}) loader = PolarsDataFrameLoader(sample_data_frame, page_content_column= 'dummy_test_column') docs = loader.load() assert docs[0].page_content == 'Hello' assert docs[1].page_content == 'World'
def test_load_uses_page_content_column_to_create_document_text( sample_data_frame: pl.DataFrame) ->None: sample_data_frame = sample_data_frame.rename(mapping={'text': 'dummy_test_column'}) loader = PolarsDataFrameLoader(sample_data_frame, page_content_column= 'dummy_test_column') docs = loader.load() assert docs[0].page_content == 'Hello' assert docs[1].page_content == 'World'
null
weighted_reciprocal_rank
""" Perform weighted Reciprocal Rank Fusion on multiple rank lists. You can find more details about RRF here: https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf Args: doc_lists: A list of rank lists, where each rank list contains unique items. Returns: list: The final aggregated list of items sorted by their weighted RRF scores in descending order. """ if len(doc_lists) != len(self.weights): raise ValueError( 'Number of rank lists must be equal to the number of weights.') all_documents = set() for doc_list in doc_lists: for doc in doc_list: all_documents.add(doc.page_content) rrf_score_dic = {doc: (0.0) for doc in all_documents} for doc_list, weight in zip(doc_lists, self.weights): for rank, doc in enumerate(doc_list, start=1): rrf_score = weight * (1 / (rank + self.c)) rrf_score_dic[doc.page_content] += rrf_score sorted_documents = sorted(rrf_score_dic.keys(), key=lambda x: rrf_score_dic [x], reverse=True) page_content_to_doc_map = {doc.page_content: doc for doc_list in doc_lists for doc in doc_list} sorted_docs = [page_content_to_doc_map[page_content] for page_content in sorted_documents] return sorted_docs
def weighted_reciprocal_rank(self, doc_lists: List[List[Document]]) ->List[ Document]: """ Perform weighted Reciprocal Rank Fusion on multiple rank lists. You can find more details about RRF here: https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf Args: doc_lists: A list of rank lists, where each rank list contains unique items. Returns: list: The final aggregated list of items sorted by their weighted RRF scores in descending order. """ if len(doc_lists) != len(self.weights): raise ValueError( 'Number of rank lists must be equal to the number of weights.') all_documents = set() for doc_list in doc_lists: for doc in doc_list: all_documents.add(doc.page_content) rrf_score_dic = {doc: (0.0) for doc in all_documents} for doc_list, weight in zip(doc_lists, self.weights): for rank, doc in enumerate(doc_list, start=1): rrf_score = weight * (1 / (rank + self.c)) rrf_score_dic[doc.page_content] += rrf_score sorted_documents = sorted(rrf_score_dic.keys(), key=lambda x: rrf_score_dic[x], reverse=True) page_content_to_doc_map = {doc.page_content: doc for doc_list in doc_lists for doc in doc_list} sorted_docs = [page_content_to_doc_map[page_content] for page_content in sorted_documents] return sorted_docs
Perform weighted Reciprocal Rank Fusion on multiple rank lists. You can find more details about RRF here: https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf Args: doc_lists: A list of rank lists, where each rank list contains unique items. Returns: list: The final aggregated list of items sorted by their weighted RRF scores in descending order.
test_figma_file_loader
"""Test Figma file loader.""" loader = FigmaFileLoader(ACCESS_TOKEN, IDS, KEY) docs = loader.load() assert len(docs) == 1
def test_figma_file_loader() ->None: """Test Figma file loader.""" loader = FigmaFileLoader(ACCESS_TOKEN, IDS, KEY) docs = loader.load() assert len(docs) == 1
Test Figma file loader.
observation_prefix
"""Prefix to append the observation with.""" return 'Observation: '
@property def observation_prefix(self) ->str: """Prefix to append the observation with.""" return 'Observation: '
Prefix to append the observation with.
__exit__
self.close()
def __exit__(self, exception_type: Any, exception_value: Any, traceback: Any ) ->None: self.close()
null
test_loading_few_shot_prompt_from_yaml
"""Test loading few shot prompt from yaml.""" with change_directory(EXAMPLE_DIR): prompt = load_prompt('few_shot_prompt.yaml') expected_prompt = FewShotPromptTemplate(input_variables=['adjective'], prefix='Write antonyms for the following words.', example_prompt= PromptTemplate(input_variables=['input', 'output'], template= """Input: {input} Output: {output}"""), examples=[{'input': 'happy', 'output': 'sad'}, {'input': 'tall', 'output': 'short'}], suffix= """Input: {adjective} Output:""") assert prompt == expected_prompt
def test_loading_few_shot_prompt_from_yaml() ->None: """Test loading few shot prompt from yaml.""" with change_directory(EXAMPLE_DIR): prompt = load_prompt('few_shot_prompt.yaml') expected_prompt = FewShotPromptTemplate(input_variables=[ 'adjective'], prefix='Write antonyms for the following words.', example_prompt=PromptTemplate(input_variables=['input', 'output'], template="""Input: {input} Output: {output}"""), examples=[{'input': 'happy', 'output': 'sad'}, {'input': 'tall', 'output': 'short'}], suffix="""Input: {adjective} Output:""") assert prompt == expected_prompt
Test loading few shot prompt from yaml.
on_tool_start
"""Do nothing when tool starts.""" pass
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, ** kwargs: Any) ->None: """Do nothing when tool starts.""" pass
Do nothing when tool starts.
on_agent_action
"""Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 tool = action.tool tool_input = str(action.tool_input) log = action.log resp = self._init_resp() resp.update({'action': 'on_agent_action', 'log': log, 'tool': tool}) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(tool_input, resp, self.step) resp.update({'tool_input': tool_input}) self.action_records.append(resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 tool = action.tool tool_input = str(action.tool_input) log = action.log resp = self._init_resp() resp.update({'action': 'on_agent_action', 'log': log, 'tool': tool}) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(tool_input, resp, self.step) resp.update({'tool_input': tool_input}) self.action_records.append(resp)
Run on agent action.
merge_chat_runs
"""Merge chat runs together. A chat run is a sequence of messages from the same sender. Args: chat_sessions: A list of chat sessions. Returns: A list of chat sessions with merged chat runs. """ for chat_session in chat_sessions: yield merge_chat_runs_in_session(chat_session)
def merge_chat_runs(chat_sessions: Iterable[ChatSession]) ->Iterator[ ChatSession]: """Merge chat runs together. A chat run is a sequence of messages from the same sender. Args: chat_sessions: A list of chat sessions. Returns: A list of chat sessions with merged chat runs. """ for chat_session in chat_sessions: yield merge_chat_runs_in_session(chat_session)
Merge chat runs together. A chat run is a sequence of messages from the same sender. Args: chat_sessions: A list of chat sessions. Returns: A list of chat sessions with merged chat runs.
ignore_agent
"""Whether to ignore agent callbacks.""" return False
@property def ignore_agent(self) ->bool: """Whether to ignore agent callbacks.""" return False
Whether to ignore agent callbacks.
_call
"""Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} params = {**_model_kwargs, **kwargs} parameter_payload = {'inputs': prompt, 'parameters': params} headers = {'Authorization': f'Bearer {self.huggingfacehub_api_token}', 'Content-Type': 'application/json'} try: response = requests.post(self.endpoint_url, headers=headers, json= parameter_payload) except requests.exceptions.RequestException as e: raise ValueError(f'Error raised by inference endpoint: {e}') generated_text = response.json() if 'error' in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}") if self.task == 'text-generation': text = generated_text[0]['generated_text'] if text.startswith(prompt): text = text[len(prompt):] elif self.task == 'text2text-generation': text = generated_text[0]['generated_text'] elif self.task == 'summarization': text = generated_text[0]['summary_text'] else: raise ValueError( f'Got invalid task {self.task}, currently only {VALID_TASKS} are supported' ) if stop is not None: text = enforce_stop_tokens(text, stop) return text
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} params = {**_model_kwargs, **kwargs} parameter_payload = {'inputs': prompt, 'parameters': params} headers = {'Authorization': f'Bearer {self.huggingfacehub_api_token}', 'Content-Type': 'application/json'} try: response = requests.post(self.endpoint_url, headers=headers, json= parameter_payload) except requests.exceptions.RequestException as e: raise ValueError(f'Error raised by inference endpoint: {e}') generated_text = response.json() if 'error' in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}") if self.task == 'text-generation': text = generated_text[0]['generated_text'] if text.startswith(prompt): text = text[len(prompt):] elif self.task == 'text2text-generation': text = generated_text[0]['generated_text'] elif self.task == 'summarization': text = generated_text[0]['summary_text'] else: raise ValueError( f'Got invalid task {self.task}, currently only {VALID_TASKS} are supported' ) if stop is not None: text = enforce_stop_tokens(text, stop) return text
Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.")
vectara3
vectara3: Vectara = Vectara() texts = [ """ The way Grounded Generation with Vectara works is we only use valid responses from your data relative to the search query. This dramatically reduces hallucinations in Vectara's responses. You can try it out on your own on our newly launched AskNews demo to experience Grounded Generation, or register an account to ground generative summaries on your own data. """ , """ Generative AI promises to revolutionize how you can benefit from your data, but you need it to provide dependable information without the risk of data leakage. This is why today we're adding a fundamental capability to our platform to make generative AI safer to use. It enables you to ask your data questions and get reliable, accurate answers by retrieving and summarizing only the relevant information. We call it “Grounded Generation”. """ , """ We are incredibly excited to share another feature with this launch: Hybrid Search! Neural LLM systems are excellent at understanding the context and meaning of end-user queries, but they can still underperform when matching exact product SKUs, unusual names of people or companies, barcodes, and other text which identifies entities rather than conveying semantics. We're bridging this gap by introducing a lexical configuration that matches exact keywords, supports Boolean operators, and executes phrase searches, and incorporates the results into our neural search results. """ ] doc_ids = [] for text in texts: ids = vectara3.add_documents([Document(page_content=text, metadata={})]) doc_ids.extend(ids) yield vectara3 for doc_id in doc_ids: vectara3._delete_doc(doc_id)
@pytest.fixture(scope='function') def vectara3(): vectara3: Vectara = Vectara() texts = [ """ The way Grounded Generation with Vectara works is we only use valid responses from your data relative to the search query. This dramatically reduces hallucinations in Vectara's responses. You can try it out on your own on our newly launched AskNews demo to experience Grounded Generation, or register an account to ground generative summaries on your own data. """ , """ Generative AI promises to revolutionize how you can benefit from your data, but you need it to provide dependable information without the risk of data leakage. This is why today we're adding a fundamental capability to our platform to make generative AI safer to use. It enables you to ask your data questions and get reliable, accurate answers by retrieving and summarizing only the relevant information. We call it “Grounded Generation”. """ , """ We are incredibly excited to share another feature with this launch: Hybrid Search! Neural LLM systems are excellent at understanding the context and meaning of end-user queries, but they can still underperform when matching exact product SKUs, unusual names of people or companies, barcodes, and other text which identifies entities rather than conveying semantics. We're bridging this gap by introducing a lexical configuration that matches exact keywords, supports Boolean operators, and executes phrase searches, and incorporates the results into our neural search results. """ ] doc_ids = [] for text in texts: ids = vectara3.add_documents([Document(page_content=text, metadata={})] ) doc_ids.extend(ids) yield vectara3 for doc_id in doc_ids: vectara3._delete_doc(doc_id)
null
_Call
self.dispatch(t.func) self.write('(') comma = False for e in t.args: if comma: self.write(', ') else: comma = True self.dispatch(e) for e in t.keywords: if comma: self.write(', ') else: comma = True self.dispatch(e) self.write(')')
def _Call(self, t): self.dispatch(t.func) self.write('(') comma = False for e in t.args: if comma: self.write(', ') else: comma = True self.dispatch(e) for e in t.keywords: if comma: self.write(', ') else: comma = True self.dispatch(e) self.write(')')
null
process_output
""" Uses regex to remove the command from the output Args: output: a process' output string command: the executed command """ pattern = re.escape(command) + '\\s*\\n' output = re.sub(pattern, '', output, count=1) return output.strip()
def process_output(self, output: str, command: str) ->str: """ Uses regex to remove the command from the output Args: output: a process' output string command: the executed command """ pattern = re.escape(command) + '\\s*\\n' output = re.sub(pattern, '', output, count=1) return output.strip()
Uses regex to remove the command from the output Args: output: a process' output string command: the executed command
on_agent_action
self.on_agent_action_common()
def on_agent_action(self, *args: Any, **kwargs: Any) ->Any: self.on_agent_action_common()
null
from_function
"""Initialize tool from a function.""" if func is None and coroutine is None: raise ValueError('Function and/or coroutine must be provided') return cls(name=name, func=func, coroutine=coroutine, description= description, return_direct=return_direct, args_schema=args_schema, **kwargs )
@classmethod def from_function(cls, func: Optional[Callable], name: str, description: str, return_direct: bool=False, args_schema: Optional[Type[BaseModel]]= None, coroutine: Optional[Callable[..., Awaitable[Any]]]=None, **kwargs: Any) ->Tool: """Initialize tool from a function.""" if func is None and coroutine is None: raise ValueError('Function and/or coroutine must be provided') return cls(name=name, func=func, coroutine=coroutine, description= description, return_direct=return_direct, args_schema=args_schema, **kwargs)
Initialize tool from a function.
get_title
return self.DocumentTitle or ''
def get_title(self) ->str: return self.DocumentTitle or ''
null
add_texts
"""Run more texts through the embeddings and add to the retriever. Args: texts: Iterable of strings to add to the retriever. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the retriever. """ try: from elasticsearch.helpers import bulk except ImportError: raise ValueError( 'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.' ) requests = [] ids = [] for i, text in enumerate(texts): _id = str(uuid.uuid4()) request = {'_op_type': 'index', '_index': self.index_name, 'content': text, '_id': _id} ids.append(_id) requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids
def add_texts(self, texts: Iterable[str], refresh_indices: bool=True) ->List[ str]: """Run more texts through the embeddings and add to the retriever. Args: texts: Iterable of strings to add to the retriever. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the retriever. """ try: from elasticsearch.helpers import bulk except ImportError: raise ValueError( 'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.' ) requests = [] ids = [] for i, text in enumerate(texts): _id = str(uuid.uuid4()) request = {'_op_type': 'index', '_index': self.index_name, 'content': text, '_id': _id} ids.append(_id) requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids
Run more texts through the embeddings and add to the retriever. Args: texts: Iterable of strings to add to the retriever. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the retriever.
from_texts
"""Return Marqo initialized from texts. Note that Marqo does not need embeddings, we retain the parameter to adhere to the Liskov substitution principle. This is a quick way to get started with marqo - simply provide your texts and metadatas and this will create an instance of the data store and index the provided data. To know the ids of your documents with this approach you will need to include them in under the key "_id" in your metadatas for each text Example: .. code-block:: python from langchain_community.vectorstores import Marqo datastore = Marqo(texts=['text'], index_name='my-first-index', url='http://localhost:8882') Args: texts (List[str]): A list of texts to index into marqo upon creation. embedding (Any, optional): Embeddings (not required). Defaults to None. index_name (str, optional): The name of the index to use, if none is provided then one will be created with a UUID. Defaults to None. url (str, optional): The URL for Marqo. Defaults to "http://localhost:8882". api_key (str, optional): The API key for Marqo. Defaults to "". metadatas (Optional[List[dict]], optional): A list of metadatas, to accompany the texts. Defaults to None. this is only used when a new index is being created. Defaults to "cpu". Can be "cpu" or "cuda". add_documents_settings (Optional[Dict[str, Any]], optional): Settings for adding documents, see https://docs.marqo.ai/0.0.16/API-Reference/documents/#query-parameters. Defaults to {}. index_settings (Optional[Dict[str, Any]], optional): Index settings if the index doesn't exist, see https://docs.marqo.ai/0.0.16/API-Reference/indexes/#index-defaults-object. Defaults to {}. Returns: Marqo: An instance of the Marqo vector store """ try: import marqo except ImportError: raise ImportError( 'Could not import marqo python package. Please install it with `pip install marqo`.' ) if not index_name: index_name = str(uuid.uuid4()) client = marqo.Client(url=url, api_key=api_key) try: client.create_index(index_name, settings_dict=index_settings or {}) if verbose: print(f'Created {index_name} successfully.') except Exception: if verbose: print(f'Index {index_name} exists.') instance: Marqo = cls(client, index_name, searchable_attributes= searchable_attributes, add_documents_settings=add_documents_settings or {}, page_content_builder=page_content_builder) instance.add_texts(texts, metadatas) return instance
@classmethod def from_texts(cls, texts: List[str], embedding: Any=None, metadatas: Optional[List[dict]]=None, index_name: str='', url: str= 'http://localhost:8882', api_key: str='', add_documents_settings: Optional[Dict[str, Any]]=None, searchable_attributes: Optional[List[str ]]=None, page_content_builder: Optional[Callable[[Dict[str, str]], str] ]=None, index_settings: Optional[Dict[str, Any]]=None, verbose: bool= True, **kwargs: Any) ->Marqo: """Return Marqo initialized from texts. Note that Marqo does not need embeddings, we retain the parameter to adhere to the Liskov substitution principle. This is a quick way to get started with marqo - simply provide your texts and metadatas and this will create an instance of the data store and index the provided data. To know the ids of your documents with this approach you will need to include them in under the key "_id" in your metadatas for each text Example: .. code-block:: python from langchain_community.vectorstores import Marqo datastore = Marqo(texts=['text'], index_name='my-first-index', url='http://localhost:8882') Args: texts (List[str]): A list of texts to index into marqo upon creation. embedding (Any, optional): Embeddings (not required). Defaults to None. index_name (str, optional): The name of the index to use, if none is provided then one will be created with a UUID. Defaults to None. url (str, optional): The URL for Marqo. Defaults to "http://localhost:8882". api_key (str, optional): The API key for Marqo. Defaults to "". metadatas (Optional[List[dict]], optional): A list of metadatas, to accompany the texts. Defaults to None. this is only used when a new index is being created. Defaults to "cpu". Can be "cpu" or "cuda". add_documents_settings (Optional[Dict[str, Any]], optional): Settings for adding documents, see https://docs.marqo.ai/0.0.16/API-Reference/documents/#query-parameters. Defaults to {}. index_settings (Optional[Dict[str, Any]], optional): Index settings if the index doesn't exist, see https://docs.marqo.ai/0.0.16/API-Reference/indexes/#index-defaults-object. Defaults to {}. Returns: Marqo: An instance of the Marqo vector store """ try: import marqo except ImportError: raise ImportError( 'Could not import marqo python package. Please install it with `pip install marqo`.' ) if not index_name: index_name = str(uuid.uuid4()) client = marqo.Client(url=url, api_key=api_key) try: client.create_index(index_name, settings_dict=index_settings or {}) if verbose: print(f'Created {index_name} successfully.') except Exception: if verbose: print(f'Index {index_name} exists.') instance: Marqo = cls(client, index_name, searchable_attributes= searchable_attributes, add_documents_settings= add_documents_settings or {}, page_content_builder=page_content_builder ) instance.add_texts(texts, metadatas) return instance
Return Marqo initialized from texts. Note that Marqo does not need embeddings, we retain the parameter to adhere to the Liskov substitution principle. This is a quick way to get started with marqo - simply provide your texts and metadatas and this will create an instance of the data store and index the provided data. To know the ids of your documents with this approach you will need to include them in under the key "_id" in your metadatas for each text Example: .. code-block:: python from langchain_community.vectorstores import Marqo datastore = Marqo(texts=['text'], index_name='my-first-index', url='http://localhost:8882') Args: texts (List[str]): A list of texts to index into marqo upon creation. embedding (Any, optional): Embeddings (not required). Defaults to None. index_name (str, optional): The name of the index to use, if none is provided then one will be created with a UUID. Defaults to None. url (str, optional): The URL for Marqo. Defaults to "http://localhost:8882". api_key (str, optional): The API key for Marqo. Defaults to "". metadatas (Optional[List[dict]], optional): A list of metadatas, to accompany the texts. Defaults to None. this is only used when a new index is being created. Defaults to "cpu". Can be "cpu" or "cuda". add_documents_settings (Optional[Dict[str, Any]], optional): Settings for adding documents, see https://docs.marqo.ai/0.0.16/API-Reference/documents/#query-parameters. Defaults to {}. index_settings (Optional[Dict[str, Any]], optional): Index settings if the index doesn't exist, see https://docs.marqo.ai/0.0.16/API-Reference/indexes/#index-defaults-object. Defaults to {}. Returns: Marqo: An instance of the Marqo vector store
_parse_response
formatted_list: list = [] if len(response) == 1: self._parse_json_multilevel(response[0]['extracted_data'][0], formatted_list) else: for entry in response: if entry.get('provider') == 'eden-ai': self._parse_json_multilevel(entry['extracted_data'][0], formatted_list) return '\n'.join(formatted_list)
def _parse_response(self, response: list) ->str: formatted_list: list = [] if len(response) == 1: self._parse_json_multilevel(response[0]['extracted_data'][0], formatted_list) else: for entry in response: if entry.get('provider') == 'eden-ai': self._parse_json_multilevel(entry['extracted_data'][0], formatted_list) return '\n'.join(formatted_list)
null
validate_environment
"""Validate that open_clip and torch libraries are installed.""" try: import open_clip model_name = values.get('model_name', cls.__fields__['model_name'].default) checkpoint = values.get('checkpoint', cls.__fields__['checkpoint'].default) model, _, preprocess = open_clip.create_model_and_transforms(model_name =model_name, pretrained=checkpoint) tokenizer = open_clip.get_tokenizer(model_name) values['model'] = model values['preprocess'] = preprocess values['tokenizer'] = tokenizer except ImportError: raise ImportError( 'Please ensure both open_clip and torch libraries are installed. pip install open_clip_torch torch' ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that open_clip and torch libraries are installed.""" try: import open_clip model_name = values.get('model_name', cls.__fields__['model_name']. default) checkpoint = values.get('checkpoint', cls.__fields__['checkpoint']. default) model, _, preprocess = open_clip.create_model_and_transforms(model_name =model_name, pretrained=checkpoint) tokenizer = open_clip.get_tokenizer(model_name) values['model'] = model values['preprocess'] = preprocess values['tokenizer'] = tokenizer except ImportError: raise ImportError( 'Please ensure both open_clip and torch libraries are installed. pip install open_clip_torch torch' ) return values
Validate that open_clip and torch libraries are installed.
test_call
"""Test that call gives correct answer.""" search = SearchApiAPIWrapper() output = search.run('What is the capital of Lithuania?') assert 'Vilnius' in output
def test_call() ->None: """Test that call gives correct answer.""" search = SearchApiAPIWrapper() output = search.run('What is the capital of Lithuania?') assert 'Vilnius' in output
Test that call gives correct answer.
compress_documents
"""Filter documents based on similarity of their embeddings to the query.""" stateful_documents = get_stateful_documents(documents) embedded_documents = _get_embeddings_from_stateful_docs(self.embeddings, stateful_documents) embedded_query = self.embeddings.embed_query(query) similarity = self.similarity_fn([embedded_query], embedded_documents)[0] included_idxs = np.arange(len(embedded_documents)) if self.k is not None: included_idxs = np.argsort(similarity)[::-1][:self.k] if self.similarity_threshold is not None: similar_enough = np.where(similarity[included_idxs] > self. similarity_threshold) included_idxs = included_idxs[similar_enough] for i in included_idxs: stateful_documents[i].state['query_similarity_score'] = similarity[i] return [stateful_documents[i] for i in included_idxs]
def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks]=None) ->Sequence[Document]: """Filter documents based on similarity of their embeddings to the query.""" stateful_documents = get_stateful_documents(documents) embedded_documents = _get_embeddings_from_stateful_docs(self.embeddings, stateful_documents) embedded_query = self.embeddings.embed_query(query) similarity = self.similarity_fn([embedded_query], embedded_documents)[0] included_idxs = np.arange(len(embedded_documents)) if self.k is not None: included_idxs = np.argsort(similarity)[::-1][:self.k] if self.similarity_threshold is not None: similar_enough = np.where(similarity[included_idxs] > self. similarity_threshold) included_idxs = included_idxs[similar_enough] for i in included_idxs: stateful_documents[i].state['query_similarity_score'] = similarity[i] return [stateful_documents[i] for i in included_idxs]
Filter documents based on similarity of their embeddings to the query.
similarity_search_by_vector
raise NotImplementedError
def similarity_search_by_vector(self, embedding: List[float], k: int=4, ** kwargs: Any) ->List[Document]: raise NotImplementedError
null
_resolve_schema_references
""" Resolves the $ref keys in a JSON schema object using the provided definitions. """ if isinstance(schema, list): for i, item in enumerate(schema): schema[i] = _resolve_schema_references(item, definitions) elif isinstance(schema, dict): if '$ref' in schema: ref_key = schema.pop('$ref').split('/')[-1] ref = definitions.get(ref_key, {}) schema.update(ref) else: for key, value in schema.items(): schema[key] = _resolve_schema_references(value, definitions) return schema
def _resolve_schema_references(schema: Any, definitions: Dict[str, Any]) ->Any: """ Resolves the $ref keys in a JSON schema object using the provided definitions. """ if isinstance(schema, list): for i, item in enumerate(schema): schema[i] = _resolve_schema_references(item, definitions) elif isinstance(schema, dict): if '$ref' in schema: ref_key = schema.pop('$ref').split('/')[-1] ref = definitions.get(ref_key, {}) schema.update(ref) else: for key, value in schema.items(): schema[key] = _resolve_schema_references(value, definitions) return schema
Resolves the $ref keys in a JSON schema object using the provided definitions.
embed_documents
return [self._get_embedding() for _ in texts]
def embed_documents(self, texts: List[str]) ->List[List[float]]: return [self._get_embedding() for _ in texts]
null
_parse_json
result = [] label_info = [] for found_obj in json_data['items']: label_str = f"{found_obj['label']} - Confidence {found_obj['confidence']}" x_min = found_obj.get('x_min') x_max = found_obj.get('x_max') y_min = found_obj.get('y_min') y_max = found_obj.get('y_max') if self.show_positions and all([x_min, x_max, y_min, y_max]): label_str += f""",at the position x_min: {x_min}, x_max: {x_max}, y_min: {y_min}, y_max: {y_max}""" label_info.append(label_str) result.append('\n'.join(label_info)) return '\n\n'.join(result)
def _parse_json(self, json_data: dict) ->str: result = [] label_info = [] for found_obj in json_data['items']: label_str = ( f"{found_obj['label']} - Confidence {found_obj['confidence']}") x_min = found_obj.get('x_min') x_max = found_obj.get('x_max') y_min = found_obj.get('y_min') y_max = found_obj.get('y_max') if self.show_positions and all([x_min, x_max, y_min, y_max]): label_str += f""",at the position x_min: {x_min}, x_max: {x_max}, y_min: {y_min}, y_max: {y_max}""" label_info.append(label_str) result.append('\n'.join(label_info)) return '\n\n'.join(result)
null
_import_slack_send_message
from langchain_community.tools.slack.send_message import SlackSendMessage return SlackSendMessage
def _import_slack_send_message() ->Any: from langchain_community.tools.slack.send_message import SlackSendMessage return SlackSendMessage
null
_get_mock_quip_loader
quip_loader = QuipLoader(self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60) quip_loader.quip_client = mock_quip return quip_loader
def _get_mock_quip_loader(self, mock_quip: MagicMock) ->QuipLoader: quip_loader = QuipLoader(self.API_URL, access_token=self.ACCESS_TOKEN, request_timeout=60) quip_loader.quip_client = mock_quip return quip_loader
null
nested_element
"""Get nested element from path.""" if len(path) == 0: return AddableDict({elem.tag: elem.text}) else: return AddableDict({path[0]: [nested_element(path[1:], elem)]})
def nested_element(path: List[str], elem: ET.Element) ->Any: """Get nested element from path.""" if len(path) == 0: return AddableDict({elem.tag: elem.text}) else: return AddableDict({path[0]: [nested_element(path[1:], elem)]})
Get nested element from path.
from_texts
"""Create Rockset wrapper with existing texts. This is intended as a quicker way to get started. """ assert client is not None, 'Rockset Client cannot be None' assert collection_name, 'Collection name cannot be empty' assert text_key, 'Text key name cannot be empty' assert embedding_key, 'Embedding key cannot be empty' rockset = cls(client, embedding, collection_name, text_key, embedding_key) rockset.add_texts(texts, metadatas, ids, batch_size) return rockset
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, client: Any=None, collection_name: str='', text_key: str='', embedding_key: str='', ids: Optional[List[str]]=None, batch_size: int=32, **kwargs: Any) ->Rockset: """Create Rockset wrapper with existing texts. This is intended as a quicker way to get started. """ assert client is not None, 'Rockset Client cannot be None' assert collection_name, 'Collection name cannot be empty' assert text_key, 'Text key name cannot be empty' assert embedding_key, 'Embedding key cannot be empty' rockset = cls(client, embedding, collection_name, text_key, embedding_key) rockset.add_texts(texts, metadatas, ids, batch_size) return rockset
Create Rockset wrapper with existing texts. This is intended as a quicker way to get started.
_get_language_model
if isinstance(llm_like, BaseLanguageModel): return llm_like elif isinstance(llm_like, RunnableBinding): return _get_language_model(llm_like.bound) elif isinstance(llm_like, RunnableWithFallbacks): return _get_language_model(llm_like.runnable) elif isinstance(llm_like, (RunnableBranch, DynamicRunnable)): return _get_language_model(llm_like.default) else: raise ValueError( f'Unable to extract BaseLanguageModel from llm_like object of type {type(llm_like)}' )
def _get_language_model(llm_like: Runnable) ->BaseLanguageModel: if isinstance(llm_like, BaseLanguageModel): return llm_like elif isinstance(llm_like, RunnableBinding): return _get_language_model(llm_like.bound) elif isinstance(llm_like, RunnableWithFallbacks): return _get_language_model(llm_like.runnable) elif isinstance(llm_like, (RunnableBranch, DynamicRunnable)): return _get_language_model(llm_like.default) else: raise ValueError( f'Unable to extract BaseLanguageModel from llm_like object of type {type(llm_like)}' )
null
on_agent_action
"""Run on agent action.""" self.metrics['step'] += 1 self.metrics['tool_starts'] += 1 self.metrics['starts'] += 1 tool_starts = self.metrics['tool_starts'] resp: Dict[str, Any] = {} resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input': action.tool_input, 'log': action.log}) resp.update(self.metrics) self.jsonf(resp, self.temp_dir, f'agent_action_{tool_starts}')
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Run on agent action.""" self.metrics['step'] += 1 self.metrics['tool_starts'] += 1 self.metrics['starts'] += 1 tool_starts = self.metrics['tool_starts'] resp: Dict[str, Any] = {} resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input': action.tool_input, 'log': action.log}) resp.update(self.metrics) self.jsonf(resp, self.temp_dir, f'agent_action_{tool_starts}')
Run on agent action.