date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | apecloud/llama_index | llama_index~optimization~optimizer.py | """Optimization related classes and functions."""
import logging
from abc import abstractmethod
from typing import Callable, List, Optional
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.query.embedding_utils import get_top_k_embeddings
from llama_index.indices.query.schema import QueryBundle
logger = logging.getLogger(__name__)
class BaseTokenUsageOptimizer:
"""Base class for optimizers that should be overwritten."""
@abstractmethod
def optimize(self, query_bundle: QueryBundle, text: str) -> str:
"""Optimize the input text given the query."""
raise NotImplementedError("Not implemented yet.")
class SentenceEmbeddingOptimizer(BaseTokenUsageOptimizer):
"""Optimization of a text chunk given the query by shortening the input text."""
def __init__(
self,
embed_model: Optional[BaseEmbedding] = None,
percentile_cutoff: Optional[float] = None,
threshold_cutoff: Optional[float] = None,
tokenizer_fn: Optional[Callable[[str], List[str]]] = None,
):
"""Optimizer class that is passed into BaseGPTIndexQuery.
Should be set like this:
.. code-block:: python
from llama_index.optimization.optimizer import Optimizer
optimizer = SentenceEmbeddingOptimizer(
percentile_cutoff=0.5
this means that the top 50% of sentences will be used.
Alternatively, you can set the cutoff using a threshold
on the similarity score. In this case only sentences with a
similarity score higher than the threshold will be used.
threshold_cutoff=0.7
these cutoffs can also be used together.
)
query_engine = index.as_query_engine(
optimizer=optimizer
)
response = query_engine.query("<query_str>")
"""
self.embed_model = embed_model or OpenAIEmbedding()
self._percentile_cutoff = percentile_cutoff
self._threshold_cutoff = threshold_cutoff
if tokenizer_fn is None:
import nltk.data
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
tokenizer_fn = tokenizer.tokenize
self._tokenizer_fn = tokenizer_fn
def optimize(self, query_bundle: QueryBundle, text: str) -> str:
"""Optimize a text chunk given the query by shortening the input text."""
split_text = self._tokenizer_fn(text)
start_embed_token_ct = self.embed_model.total_tokens_used
if query_bundle.embedding is None:
query_bundle.embedding = self.embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
text_embeddings = self.embed_model._get_text_embeddings(split_text)
num_top_k = None
threshold = None
if self._percentile_cutoff is not None:
num_top_k = int(len(split_text) * self._percentile_cutoff)
if self._threshold_cutoff is not None:
threshold = self._threshold_cutoff
top_similarities, top_idxs = get_top_k_embeddings(
query_embedding=query_bundle.embedding,
embeddings=text_embeddings,
similarity_fn=self.embed_model.similarity,
similarity_top_k=num_top_k,
embedding_ids=list(range(len(text_embeddings))),
similarity_cutoff=threshold,
)
net_embed_tokens = self.embed_model.total_tokens_used - start_embed_token_ct
logger.info(
f"> [optimize] Total embedding token usage: " f"{net_embed_tokens} tokens"
)
if len(top_idxs) == 0:
raise ValueError("Optimizer returned zero sentences.")
top_sentences = [split_text[i] for i in top_idxs]
logger.debug(f"> Top {len(top_idxs)} sentences with scores:\n")
if logger.isEnabledFor(logging.DEBUG):
for i in range(len(top_idxs)):
logger.debug(f"{i}. {top_sentences[i]} ({top_similarities[i]})")
return " ".join(top_sentences)
| [] |
2024-01-10 | apecloud/llama_index | llama_index~prompts~default_prompts.py | """Set of default prompts."""
from llama_index.prompts.base import Prompt
from llama_index.prompts.prompt_type import PromptType
############################################
# Tree
############################################
DEFAULT_SUMMARY_PROMPT_TMPL = (
"Write a summary of the following. Try to use only the "
"information provided. "
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{context_str}\n"
"\n"
"\n"
'SUMMARY:"""\n'
)
DEFAULT_SUMMARY_PROMPT = Prompt(
DEFAULT_SUMMARY_PROMPT_TMPL, prompt_type=PromptType.SUMMARY
)
# insert prompts
DEFAULT_INSERT_PROMPT_TMPL = (
"Context information is below. It is provided in a numbered list "
"(1 to {num_chunks}),"
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"---------------------\n"
"Given the context information, here is a new piece of "
"information: {new_chunk_text}\n"
"Answer with the number corresponding to the summary that should be updated. "
"The answer should be the number corresponding to the "
"summary that is most relevant to the question.\n"
)
DEFAULT_INSERT_PROMPT = Prompt(
DEFAULT_INSERT_PROMPT_TMPL, prompt_type=PromptType.TREE_INSERT
)
# # single choice
DEFAULT_QUERY_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_chunks}),"
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return "
"the choice that is most relevant to the question: '{query_str}'\n"
"Provide choice in the following format: 'ANSWER: <number>' and explain why "
"this summary was selected in relation to the question.\n"
)
DEFAULT_QUERY_PROMPT = Prompt(
DEFAULT_QUERY_PROMPT_TMPL, prompt_type=PromptType.TREE_SELECT
)
# multiple choice
DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_chunks}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choices "
"(no more than {branching_factor}, ranked by most relevant to least) that "
"are most relevant to the question: '{query_str}'\n"
"Provide choices in the following format: 'ANSWER: <numbers>' and explain why "
"these summaries were selected in relation to the question.\n"
)
DEFAULT_QUERY_PROMPT_MULTIPLE = Prompt(
DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL, prompt_type=PromptType.TREE_SELECT_MULTIPLE
)
DEFAULT_REFINE_PROMPT_TMPL = (
"The original question is as follows: {query_str}\n"
"We have provided an existing answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, return the original answer."
)
DEFAULT_REFINE_PROMPT = Prompt(
DEFAULT_REFINE_PROMPT_TMPL, prompt_type=PromptType.REFINE
)
DEFAULT_TEXT_QA_PROMPT_TMPL = (
"Context information is below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given the context information and not prior knowledge, "
"answer the question: {query_str}\n"
)
DEFAULT_TEXT_QA_PROMPT = Prompt(
DEFAULT_TEXT_QA_PROMPT_TMPL, prompt_type=PromptType.QUESTION_ANSWER
)
############################################
# Keyword Table
############################################
DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
"Some text is provided below. Given the text, extract up to {max_keywords} "
"keywords from the text. Avoid stopwords."
"---------------------\n"
"{text}\n"
"---------------------\n"
"Provide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n"
)
DEFAULT_KEYWORD_EXTRACT_TEMPLATE = Prompt(
DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL, prompt_type=PromptType.KEYWORD_EXTRACT
)
# NOTE: the keyword extraction for queries can be the same as
# the one used to build the index, but here we tune it to see if performance is better.
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
"A question is provided below. Given the question, extract up to {max_keywords} "
"keywords from the text. Focus on extracting the keywords that we can use "
"to best lookup answers to the question. Avoid stopwords.\n"
"---------------------\n"
"{question}\n"
"---------------------\n"
"Provide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n"
)
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE = Prompt(
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL,
prompt_type=PromptType.QUERY_KEYWORD_EXTRACT,
)
############################################
# Structured Store
############################################
DEFAULT_SCHEMA_EXTRACT_TMPL = (
"We wish to extract relevant fields from an unstructured text chunk into "
"a structured schema. We first provide the unstructured text, and then "
"we provide the schema that we wish to extract. "
"-----------text-----------\n"
"{text}\n"
"-----------schema-----------\n"
"{schema}\n"
"---------------------\n"
"Given the text and schema, extract the relevant fields from the text in "
"the following format: "
"field1: <value>\nfield2: <value>\n...\n\n"
"If a field is not present in the text, don't include it in the output."
"If no fields are present in the text, return a blank string.\n"
"Fields: "
)
DEFAULT_SCHEMA_EXTRACT_PROMPT = Prompt(
DEFAULT_SCHEMA_EXTRACT_TMPL, prompt_type=PromptType.SCHEMA_EXTRACT
)
# NOTE: taken from langchain and adapted
# https://tinyurl.com/b772sd77
DEFAULT_TEXT_TO_SQL_TMPL = (
"Given an input question, first create a syntactically correct {dialect} "
"query to run, then look at the results of the query and return the answer. "
"You can order the results by a relevant column to return the most "
"interesting examples in the database.\n"
"Never query for all the columns from a specific table, only ask for a "
"few relevant columns given the question.\n"
"Pay attention to use only the column names that you can see in the schema "
"description. "
"Be careful to not query for columns that do not exist. "
"Pay attention to which column is in which table. "
"Also, qualify column names with the table name when needed.\n"
"Use the following format:\n"
"Question: Question here\n"
"SQLQuery: SQL Query to run\n"
"SQLResult: Result of the SQLQuery\n"
"Answer: Final answer here\n"
"Only use the tables listed below.\n"
"{schema}\n"
"Question: {query_str}\n"
"SQLQuery: "
)
DEFAULT_TEXT_TO_SQL_PROMPT = Prompt(
DEFAULT_TEXT_TO_SQL_TMPL,
stop_token="\nSQLResult:",
prompt_type=PromptType.TEXT_TO_SQL,
)
# NOTE: by partially filling schema, we can reduce to a QuestionAnswer prompt
# that we can feed to ur table
DEFAULT_TABLE_CONTEXT_TMPL = (
"We have provided a table schema below. "
"---------------------\n"
"{schema}\n"
"---------------------\n"
"We have also provided context information below. "
"{context_str}\n"
"---------------------\n"
"Given the context information and the table schema, "
"give a response to the following task: {query_str}"
)
DEFAULT_TABLE_CONTEXT_QUERY = (
"Provide a high-level description of the table, "
"as well as a description of each column in the table. "
"Provide answers in the following format:\n"
"TableDescription: <description>\n"
"Column1Description: <description>\n"
"Column2Description: <description>\n"
"...\n\n"
)
DEFAULT_TABLE_CONTEXT_PROMPT = Prompt(
DEFAULT_TABLE_CONTEXT_TMPL, prompt_type=PromptType.TABLE_CONTEXT
)
# NOTE: by partially filling schema, we can reduce to a RefinePrompt
# that we can feed to ur table
DEFAULT_REFINE_TABLE_CONTEXT_TMPL = (
"We have provided a table schema below. "
"---------------------\n"
"{schema}\n"
"---------------------\n"
"We have also provided some context information below. "
"{context_msg}\n"
"---------------------\n"
"Given the context information and the table schema, "
"give a response to the following task: {query_str}\n"
"We have provided an existing answer: {existing_answer}\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, return the original answer."
)
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT = Prompt(
DEFAULT_REFINE_TABLE_CONTEXT_TMPL, prompt_type=PromptType.TABLE_CONTEXT
)
############################################
# Knowledge-Graph Table
############################################
DEFAULT_KG_TRIPLET_EXTRACT_TMPL = (
"Some text is provided below. Given the text, extract up to "
"{max_knowledge_triplets} "
"knowledge triplets in the form of (subject, predicate, object). Avoid stopwords.\n"
"---------------------\n"
"Example:"
"Text: Alice is Bob's mother."
"Triplets:\n(Alice, is mother of, Bob)\n"
"Text: Philz is a coffee shop founded in Berkeley in 1982.\n"
"Triplets:\n"
"(Philz, is, coffee shop)\n"
"(Philz, founded in, Berkeley)\n"
"(Philz, founded in, 1982)\n"
"---------------------\n"
"Text: {text}\n"
"Triplets:\n"
)
DEFAULT_KG_TRIPLET_EXTRACT_PROMPT = Prompt(
DEFAULT_KG_TRIPLET_EXTRACT_TMPL, prompt_type=PromptType.KNOWLEDGE_TRIPLET_EXTRACT
)
############################################
# HYDE
##############################################
HYDE_TMPL = (
"Please write a passage to answer the question\n"
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{context_str}\n"
"\n"
"\n"
'Passage:"""\n'
)
DEFAULT_HYDE_PROMPT = Prompt(HYDE_TMPL, prompt_type=PromptType.SUMMARY)
############################################
# Simple Input
############################################
DEFAULT_SIMPLE_INPUT_TMPL = "{query_str}"
DEFAULT_SIMPLE_INPUT_PROMPT = Prompt(
DEFAULT_SIMPLE_INPUT_TMPL, prompt_type=PromptType.SIMPLE_INPUT
)
############################################
# Pandas
############################################
DEFAULT_PANDAS_TMPL = (
"You are working with a pandas dataframe in Python.\n"
"The name of the dataframe is `df`.\n"
"This is the result of `print(df.head())`:\n"
"{df_str}\n\n"
"Here is the input query: {query_str}.\n"
"Given the df information and the input query, please follow "
"these instructions:\n"
"{instruction_str}"
"Output:\n"
)
DEFAULT_PANDAS_PROMPT = Prompt(DEFAULT_PANDAS_TMPL, prompt_type=PromptType.PANDAS)
############################################
# JSON Path
############################################
DEFAULT_JSON_PATH_TMPL = (
"We have provided a JSON schema below:\n"
"{schema}\n"
"Given a task, respond with a JSON Path query that "
"can retrieve data from a JSON value that matches the schema.\n"
"Task: {query_str}\n"
"JSONPath: "
)
DEFAULT_JSON_PATH_PROMPT = Prompt(
DEFAULT_JSON_PATH_TMPL, prompt_type=PromptType.JSON_PATH
)
| [
"Context information is below. It is provided in a numbered list (1 to {num_chunks}),where each item in the list corresponds to a summary.\n---------------------\n{context_list}---------------------\nGiven the context information, here is a new piece of information: {new_chunk_text}\nAnswer with the number corresponding to the summary that should be updated. The answer should be the number corresponding to the summary that is most relevant to the question.\n",
"\nSQLResult:",
"Some choices are given below. It is provided in a numbered list (1 to {num_chunks}), where each item in the list corresponds to a summary.\n---------------------\n{context_list}\n---------------------\nUsing only the choices above and not prior knowledge, return the top choices (no more than {branching_factor}, ranked by most relevant to least) that are most relevant to the question: '{query_str}'\nProvide choices in the following format: 'ANSWER: <numbers>' and explain why these summaries were selected in relation to the question.\n",
"Some text is provided below. Given the text, extract up to {max_keywords} keywords from the text. Avoid stopwords.---------------------\n{text}\n---------------------\nProvide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n",
"Context information is below. \n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the question: {query_str}\n",
"A question is provided below. Given the question, extract up to {max_keywords} keywords from the text. Focus on extracting the keywords that we can use to best lookup answers to the question. Avoid stopwords.\n---------------------\n{question}\n---------------------\nProvide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n",
"Some choices are given below. It is provided in a numbered list (1 to {num_chunks}),where each item in the list corresponds to a summary.\n---------------------\n{context_list}\n---------------------\nUsing only the choices above and not prior knowledge, return the choice that is most relevant to the question: '{query_str}'\nProvide choice in the following format: 'ANSWER: <number>' and explain why this summary was selected in relation to the question.\n",
"The original question is as follows: {query_str}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.",
"Write a summary of the following. Try to use only the information provided. Try to include as many key details as possible.\n\n\n{context_str}\n\n\nSUMMARY:\"\"\"\n"
] |
2024-01-10 | apecloud/llama_index | llama_index~query_engine~sub_question_query_engine.py | import asyncio
import logging
from typing import List, Optional, Sequence, cast
from langchain.input import get_color_mapping, print_text
from llama_index.async_utils import run_async_tasks
from llama_index.callbacks.base import CallbackManager
from llama_index.data_structs.node import Node, NodeWithScore
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.query.response_synthesis import ResponseSynthesizer
from llama_index.indices.query.schema import QueryBundle
from llama_index.indices.service_context import ServiceContext
from llama_index.question_gen.llm_generators import LLMQuestionGenerator
from llama_index.question_gen.types import BaseQuestionGenerator, SubQuestion
from llama_index.response.schema import RESPONSE_TYPE
from llama_index.tools.query_engine import QueryEngineTool
logger = logging.getLogger(__name__)
class SubQuestionQueryEngine(BaseQueryEngine):
"""Sub question query engine.
A query engine that breaks down a complex query (e.g. compare and contrast) into
many sub questions and their target query engine for execution.
After executing all sub questions, all responses are gathered and sent to
response synthesizer to produce the final response.
Args:
question_gen (BaseQuestionGenerator): A module for generating sub questions
given a complex question and tools.
response_synthesizer (ResponseSynthesizer): A response synthesizer for
generating the final response
query_engine_tools (Sequence[QueryEngineTool]): Tools to answer the
sub questions.
verbose (bool): whether to print intermediate questions and answers.
Defaults to True
use_async (bool): whether to execute the sub questions with asyncio.
Defaults to True
"""
def __init__(
self,
question_gen: BaseQuestionGenerator,
response_synthesizer: ResponseSynthesizer,
query_engine_tools: Sequence[QueryEngineTool],
callback_manager: Optional[CallbackManager] = None,
verbose: bool = True,
use_async: bool = False,
) -> None:
self._question_gen = question_gen
self._response_synthesizer = response_synthesizer
self._metadatas = [x.metadata for x in query_engine_tools]
self._query_engines = {
tool.metadata.name: tool.query_engine for tool in query_engine_tools
}
self._verbose = verbose
self._use_async = use_async
super().__init__(callback_manager)
@classmethod
def from_defaults(
cls,
query_engine_tools: Sequence[QueryEngineTool],
question_gen: Optional[BaseQuestionGenerator] = None,
response_synthesizer: Optional[ResponseSynthesizer] = None,
service_context: Optional[ServiceContext] = None,
verbose: bool = True,
use_async: bool = True,
) -> "SubQuestionQueryEngine":
callback_manager = None
if len(query_engine_tools) > 0:
callback_manager = query_engine_tools[0].query_engine.callback_manager
question_gen = question_gen or LLMQuestionGenerator.from_defaults(
service_context=service_context
)
synth = response_synthesizer or ResponseSynthesizer.from_args(
callback_manager=callback_manager,
service_context=service_context,
)
return cls(
question_gen,
synth,
query_engine_tools,
callback_manager=callback_manager,
verbose=verbose,
use_async=use_async,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
sub_questions = self._question_gen.generate(self._metadatas, query_bundle)
colors = get_color_mapping([str(i) for i in range(len(sub_questions))])
if self._verbose:
print_text(f"Generated {len(sub_questions)} sub questions.\n")
if self._use_async:
tasks = [
self._aquery_subq(sub_q, color=colors[str(ind)])
for ind, sub_q in enumerate(sub_questions)
]
nodes_all = run_async_tasks(tasks)
nodes_all = cast(List[Optional[NodeWithScore]], nodes_all)
else:
nodes_all = [
self._query_subq(sub_q, color=colors[str(ind)])
for ind, sub_q in enumerate(sub_questions)
]
# filter out sub questions that failed
nodes: List[NodeWithScore] = list(filter(None, nodes_all))
return self._response_synthesizer.synthesize(
query_bundle=query_bundle,
nodes=nodes,
)
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
sub_questions = await self._question_gen.agenerate(
self._metadatas, query_bundle
)
colors = get_color_mapping([str(i) for i in range(len(sub_questions))])
if self._verbose:
print_text(f"Generated {len(sub_questions)} sub questions.\n")
tasks = [
self._aquery_subq(sub_q, color=colors[str(ind)])
for ind, sub_q in enumerate(sub_questions)
]
nodes_all = await asyncio.gather(*tasks)
nodes_all = cast(List[Optional[NodeWithScore]], nodes_all)
# filter out sub questions that failed
nodes = list(filter(None, nodes_all))
return await self._response_synthesizer.asynthesize(
query_bundle=query_bundle,
nodes=nodes,
)
async def _aquery_subq(
self, sub_q: SubQuestion, color: Optional[str] = None
) -> Optional[NodeWithScore]:
try:
question = sub_q.sub_question
query_engine = self._query_engines[sub_q.tool_name]
if self._verbose:
print_text(f"[{sub_q.tool_name}] Q: {question}\n", color=color)
response = await query_engine.aquery(question)
response_text = str(response)
node_text = f"Sub question: {question}\nResponse: {response_text}"
if self._verbose:
print_text(f"[{sub_q.tool_name}] A: {response_text}\n", color=color)
return NodeWithScore(Node(text=node_text))
except ValueError:
logger.warn(f"[{sub_q.tool_name}] Failed to run {question}")
return None
def _query_subq(
self, sub_q: SubQuestion, color: Optional[str] = None
) -> Optional[NodeWithScore]:
try:
question = sub_q.sub_question
query_engine = self._query_engines[sub_q.tool_name]
if self._verbose:
print_text(f"[{sub_q.tool_name}] Q: {question}\n", color=color)
response = query_engine.query(question)
response_text = str(response)
node_text = f"Sub question: {question}\nResponse: {response_text}"
if self._verbose:
print_text(f"[{sub_q.tool_name}] A: {response_text}\n", color=color)
return NodeWithScore(Node(text=node_text))
except ValueError:
logger.warn(f"[{sub_q.tool_name}] Failed to run {question}")
return None
| [] |
2024-01-10 | apecloud/llama_index | llama_index~query_engine~sql_join_query_engine.py | """SQL Join query engine."""
from langchain.input import print_text
from typing import Optional, cast, Dict, Callable
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.struct_store.sql_query import NLStructStoreQueryEngine
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.tools.query_engine import QueryEngineTool
from llama_index.indices.service_context import ServiceContext
from llama_index.selectors.llm_selectors import LLMSingleSelector
from llama_index.prompts.base import Prompt
from llama_index.indices.query.query_transform.base import BaseQueryTransform
import logging
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor
from llama_index.callbacks.base import CallbackManager
logger = logging.getLogger(__name__)
DEFAULT_SQL_JOIN_SYNTHESIS_PROMPT_TMPL = """
The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
Given the SQL response, the question has also been transformed into a more detailed query,
and executed against another query engine.
The transformed query and query engine response are also given below.
Given SQL query, SQL response, transformed query, and query engine response, please synthesize a response to the original question.
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
Transformed query: {query_engine_query_str}
Query engine response: {query_engine_response_str}
Response:
""" # noqa
DEFAULT_SQL_JOIN_SYNTHESIS_PROMPT = Prompt(DEFAULT_SQL_JOIN_SYNTHESIS_PROMPT_TMPL)
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL = """
"The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
The SQL response either answers the question, or should provide additional context that can be used to make the question more specific.
Your job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.
Examples:
Original question: Please give more details about the demographics of the city with the highest population.
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: Can you tell me more about the demographics of New York City?
Original question: Please compare the sports environment of cities in North America.
SQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3
SQL response: The cities in North America are New York, San Francisco, and Toronto.
New question: What sports are played in New York, San Francisco, and Toronto?
Original question: What is the city with the highest population?
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: None
Original question: What countries are the top 3 ATP players from?
SQL query: SELECT country FROM players WHERE rank <= 3
SQL response: The top 3 ATP players are from Serbia, Russia, and Spain.
New question: None
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
New question: "
""" # noqa
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT = Prompt(DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL)
def _default_check_stop(query_bundle: QueryBundle) -> bool:
"""Default check stop function."""
return query_bundle.query_str.lower() == "none"
def _format_sql_query(sql_query: str) -> str:
"""Format SQL query."""
return sql_query.replace("\n", " ").replace("\t", " ")
class SQLAugmentQueryTransform(BaseQueryTransform):
"""SQL Augment Query Transform.
This query transform will transform the query into a more specific query
after augmenting with SQL results.
Args:
llm_predictor (LLMPredictor): LLM predictor to use for query transformation.
sql_augment_transform_prompt (Prompt): Prompt to use for query transformation.
check_stop_parser (Optional[Callable[[str], bool]]): Check stop function.
"""
def __init__(
self,
llm_predictor: Optional[BaseLLMPredictor] = None,
sql_augment_transform_prompt: Optional[Prompt] = None,
check_stop_parser: Optional[Callable[[QueryBundle], bool]] = None,
) -> None:
"""Initialize params."""
self._llm_predictor = llm_predictor or LLMPredictor()
self._sql_augment_transform_prompt = (
sql_augment_transform_prompt or DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT
)
self._check_stop_parser = check_stop_parser or _default_check_stop
def _run(self, query_bundle: QueryBundle, extra_info: Dict) -> QueryBundle:
"""Run query transform."""
query_str = query_bundle.query_str
sql_query = extra_info["sql_query"]
sql_query_response = extra_info["sql_query_response"]
new_query_str, formatted_prompt = self._llm_predictor.predict(
self._sql_augment_transform_prompt,
query_str=query_str,
sql_query_str=sql_query,
sql_response_str=sql_query_response,
)
return QueryBundle(
new_query_str, custom_embedding_strs=query_bundle.custom_embedding_strs
)
def check_stop(self, query_bundle: QueryBundle) -> bool:
"""Check if query indicates stop."""
return self._check_stop_parser(query_bundle)
class SQLJoinQueryEngine(BaseQueryEngine):
"""SQL Join Query Engine.
This query engine can "Join" a SQL database results
with another query engine.
It can decide it needs to query the SQL database or the other query engine.
If it decides to query the SQL database, it will first query the SQL database,
whether to augment information with retrieved results from the other query engine.
Args:
sql_query_tool (QueryEngineTool): Query engine tool for SQL database.
other_query_tool (QueryEngineTool): Other query engine tool.
selector (Optional[LLMSingleSelector]): Selector to use.
service_context (Optional[ServiceContext]): Service context to use.
sql_join_synthesis_prompt (Optional[Prompt]): Prompt to use for SQL join
synthesis.
sql_augment_query_transform (Optional[SQLAugmentQueryTransform]): Query
transform to use for SQL augmentation.
use_sql_join_synthesis (bool): Whether to use SQL join synthesis.
callback_manager (Optional[CallbackManager]): Callback manager to use.
verbose (bool): Whether to print intermediate results.
"""
def __init__(
self,
sql_query_tool: QueryEngineTool,
other_query_tool: QueryEngineTool,
selector: Optional[LLMSingleSelector] = None,
service_context: Optional[ServiceContext] = None,
sql_join_synthesis_prompt: Optional[Prompt] = None,
sql_augment_query_transform: Optional[SQLAugmentQueryTransform] = None,
use_sql_join_synthesis: bool = True,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = True,
) -> None:
"""Initialize params."""
super().__init__(callback_manager=callback_manager)
# validate that the query engines are of the right type
if not isinstance(sql_query_tool.query_engine, NLStructStoreQueryEngine):
raise ValueError(
"sql_query_tool.query_engine must be an instance of "
"NLStructStoreQueryEngine"
)
self._sql_query_tool = sql_query_tool
self._other_query_tool = other_query_tool
sql_query_engine = cast(NLStructStoreQueryEngine, sql_query_tool.query_engine)
self._service_context = service_context or sql_query_engine.service_context
self._selector = selector or LLMSingleSelector.from_defaults()
self._sql_join_synthesis_prompt = (
sql_join_synthesis_prompt or DEFAULT_SQL_JOIN_SYNTHESIS_PROMPT
)
self._sql_augment_query_transform = (
sql_augment_query_transform
or SQLAugmentQueryTransform(
llm_predictor=self._service_context.llm_predictor
)
)
self._use_sql_join_synthesis = use_sql_join_synthesis
self._verbose = verbose
def _query_sql_other(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query SQL database + other query engine in sequence."""
# first query SQL database
sql_response = self._sql_query_tool.query_engine.query(query_bundle)
if not self._use_sql_join_synthesis:
return sql_response
sql_query = (
sql_response.extra_info["sql_query"] if sql_response.extra_info else None
)
if self._verbose:
print_text(f"SQL query: {sql_query}\n", color="yellow")
print_text(f"SQL response: {sql_response}\n", color="yellow")
# given SQL db, transform query into new query
new_query = self._sql_augment_query_transform(
query_bundle.query_str,
extra_info={
"sql_query": _format_sql_query(sql_query),
"sql_query_response": str(sql_response),
},
)
if self._verbose:
print_text(
f"Transformed query given SQL response: {new_query.query_str}\n",
color="blue",
)
logger.info(f"> Transformed query given SQL response: {new_query.query_str}")
if self._sql_augment_query_transform.check_stop(new_query):
return sql_response
other_response = self._other_query_tool.query_engine.query(new_query)
if self._verbose:
print_text(f"query engine response: {other_response}\n", color="pink")
logger.info(f"> query engine response: {other_response}")
response_str, _ = self._service_context.llm_predictor.predict(
self._sql_join_synthesis_prompt,
query_str=query_bundle.query_str,
sql_query_str=sql_query,
sql_response_str=str(sql_response),
query_engine_query_str=new_query.query_str,
query_engine_response_str=str(other_response),
)
if self._verbose:
print_text(f"Final response: {response_str}\n", color="green")
response_extra_info = {
**(sql_response.extra_info or {}),
**(other_response.extra_info or {}),
}
source_nodes = other_response.source_nodes
return Response(
response_str,
extra_info=response_extra_info,
source_nodes=source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query and get response."""
# TODO: see if this can be consolidated with logic in RouterQueryEngine
metadatas = [self._sql_query_tool.metadata, self._other_query_tool.metadata]
result = self._selector.select(metadatas, query_bundle)
# pick sql query
if result.ind == 0:
if self._verbose:
print_text(f"Querying SQL database: {result.reason}\n", color="blue")
logger.info(f"> Querying SQL database: {result.reason}")
return self._query_sql_other(query_bundle)
elif result.ind == 1:
if self._verbose:
print_text(
f"Querying other query engine: {result.reason}\n", color="blue"
)
logger.info(f"> Querying other query engine: {result.reason}")
response = self._other_query_tool.query_engine.query(query_bundle)
if self._verbose:
print_text(f"Query Engine response: {response}\n", color="pink")
return response
else:
raise ValueError(f"Invalid result.ind: {result.ind}")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
# TODO: make async
return self._query(query_bundle)
| [
"\nThe original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nGiven the SQL response, the question has also been transformed into a more detailed query,\nand executed against another query engine.\nThe transformed query and query engine response are also given below.\nGiven SQL query, SQL response, transformed query, and query engine response, please synthesize a response to the original question.\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nTransformed query: {query_engine_query_str}\nQuery engine response: {query_engine_response_str}\nResponse: \n",
"\n\"The original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nThe SQL response either answers the question, or should provide additional context that can be used to make the question more specific.\nYour job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.\n\nExamples:\n\nOriginal question: Please give more details about the demographics of the city with the highest population.\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: Can you tell me more about the demographics of New York City?\n\nOriginal question: Please compare the sports environment of cities in North America.\nSQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3\nSQL response: The cities in North America are New York, San Francisco, and Toronto.\nNew question: What sports are played in New York, San Francisco, and Toronto?\n\nOriginal question: What is the city with the highest population?\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: None\n\nOriginal question: What countries are the top 3 ATP players from?\nSQL query: SELECT country FROM players WHERE rank <= 3\nSQL response: The top 3 ATP players are from Serbia, Russia, and Spain.\nNew question: None\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nNew question: \"\n",
"North America",
"None"
] |
2024-01-10 | apecloud/llama_index | llama_index~indices~struct_store~json_query.py | import json
import logging
from typing import Any, Callable, Dict, List, Optional, Union
from langchain.input import print_text
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.query.schema import QueryBundle
from llama_index.indices.service_context import ServiceContext
from llama_index.prompts.base import Prompt
from llama_index.prompts.default_prompts import DEFAULT_JSON_PATH_PROMPT
from llama_index.prompts.prompt_type import PromptType
from llama_index.response.schema import Response
from llama_index.token_counter.token_counter import llm_token_counter
logger = logging.getLogger(__name__)
IMPORT_ERROR_MSG = (
"`jsonpath_ng` package not found, please run `pip install jsonpath-ng`"
)
JSONType = Union[Dict[str, "JSONType"], List["JSONType"], str, int, float, bool, None]
DEFAULT_RESPONSE_SYNTHESIS_PROMPT_TMPL = (
"Given an input question about a JSON value, synthesize a response "
"from the query results.\n"
"Query: {query_str}\n"
"JSON Schema: {json_schema}\n"
"JSON Path: {json_path}\n"
"Value at path: {json_path_value}\n"
"Response: "
)
DEFAULT_RESPONSE_SYNTHESIS_PROMPT = Prompt(
DEFAULT_RESPONSE_SYNTHESIS_PROMPT_TMPL,
prompt_type=PromptType.SQL_RESPONSE_SYNTHESIS,
)
def default_output_processor(llm_output: str, json_value: JSONType) -> JSONType:
"""Default output processor that executes the JSON Path query."""
try:
from jsonpath_ng.ext import parse
from jsonpath_ng.jsonpath import DatumInContext
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MSG) from exc
datum: List[DatumInContext] = parse(llm_output).find(json_value)
return [d.value for d in datum]
class JSONQueryEngine(BaseQueryEngine):
"""GPT JSON Query Engine.
Converts natural language to JSON Path queries.
Args:
json_value (JSONType): JSON value
json_schema (JSONType): JSON schema
service_context (ServiceContext): ServiceContext
json_path_prompt (Prompt): The JSON Path prompt to use.
output_processor (Callable): The output processor that executes the
JSON Path query.
output_kwargs (dict): Additional output processor kwargs for the
output_processor function.
verbose (bool): Whether to print verbose output.
"""
def __init__(
self,
json_value: JSONType,
json_schema: JSONType,
service_context: ServiceContext,
json_path_prompt: Optional[Prompt] = None,
output_processor: Optional[Callable] = None,
output_kwargs: Optional[dict] = None,
synthesize_response: bool = True,
response_synthesis_prompt: Optional[Prompt] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._json_value = json_value
self._json_schema = json_schema
self._service_context = service_context
self._json_path_prompt = json_path_prompt or DEFAULT_JSON_PATH_PROMPT
self._output_processor = output_processor or default_output_processor
self._output_kwargs = output_kwargs or {}
self._verbose = verbose
self._synthesize_response = synthesize_response
self._response_synthesis_prompt = (
response_synthesis_prompt or DEFAULT_RESPONSE_SYNTHESIS_PROMPT
)
super().__init__(self._service_context.callback_manager)
def _get_schema_context(self) -> str:
"""Get JSON schema context."""
return json.dumps(self._json_schema)
@llm_token_counter("query")
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
schema = self._get_schema_context()
(
json_path_response_str,
formatted_prompt,
) = self._service_context.llm_predictor.predict(
self._json_path_prompt,
schema=schema,
query_str=query_bundle.query_str,
)
if self._verbose:
print_text(f"> JSONPath Prompt: {formatted_prompt}\n")
print_text(
f"> JSONPath Instructions:\n" f"```\n{json_path_response_str}\n```\n"
)
json_path_output = self._output_processor(
json_path_response_str,
self._json_value,
**self._output_kwargs,
)
if self._verbose:
print_text(f"> JSONPath Output: {json_path_output}\n")
if self._synthesize_response:
response_str, _ = self._service_context.llm_predictor.predict(
self._response_synthesis_prompt,
query_str=query_bundle.query_str,
json_schema=self._json_schema,
json_path=json_path_response_str,
json_path_value=json_path_output,
)
else:
response_str = json.dumps(json_path_output)
response_extra_info = {
"json_path_response_str": json_path_response_str,
}
return Response(response=response_str, extra_info=response_extra_info)
@llm_token_counter("aquery")
async def _aquery(self, query_bundle: QueryBundle) -> Response:
schema = self._get_schema_context()
(
json_path_response_str,
formatted_prompt,
) = await self._service_context.llm_predictor.apredict(
self._json_path_prompt,
schema=schema,
query_str=query_bundle.query_str,
)
if self._verbose:
print_text(f"> JSONPath Prompt: {formatted_prompt}\n")
print_text(
f"> JSONPath Instructions:\n" f"```\n{json_path_response_str}\n```\n"
)
json_path_output = self._output_processor(
json_path_response_str,
self._json_value,
**self._output_kwargs,
)
if self._verbose:
print_text(f"> JSONPath Output: {json_path_output}\n")
if self._synthesize_response:
response_str, _ = await self._service_context.llm_predictor.apredict(
self._response_synthesis_prompt,
query_str=query_bundle.query_str,
json_schema=self._json_schema,
json_path=json_path_response_str,
json_path_value=json_path_output,
)
else:
response_str = json.dumps(json_path_output)
response_extra_info = {
"json_path_response_str": json_path_response_str,
}
return Response(response=response_str, extra_info=response_extra_info)
| [
"Given an input question about a JSON value, synthesize a response from the query results.\nQuery: {query_str}\nJSON Schema: {json_schema}\nJSON Path: {json_path}\nValue at path: {json_path_value}\nResponse: "
] |
2024-01-10 | apecloud/llama_index | llama_index~indices~struct_store~pandas_query.py | """Default query for PandasIndex."""
import logging
from typing import Any, Callable, Optional
import pandas as pd
from langchain.input import print_text
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.query.schema import QueryBundle
from llama_index.indices.struct_store.pandas import PandasIndex
from llama_index.prompts.default_prompts import DEFAULT_PANDAS_PROMPT
from llama_index.prompts.prompts import PandasPrompt
from llama_index.response.schema import Response
logger = logging.getLogger(__name__)
DEFAULT_INSTRUCTION_STR = (
"We wish to convert this query to executable Python code using Pandas.\n"
"The final line of code should be a Python expression that can be called "
"with the `eval()` function. This expression should represent a solution "
"to the query."
)
def default_output_processor(
output: str, df: pd.DataFrame, **output_kwargs: Any
) -> str:
"""Process outputs in a default manner."""
import ast
import sys
import traceback
if sys.version_info < (3, 9):
logger.warn(
"Python version must be >= 3.9 in order to use "
"the default output processor, which executes "
"the Python query. Instead, we will return the "
"raw Python instructions as a string."
)
return output
local_vars = {"df": df}
# NOTE: inspired from langchain's tool
# see langchain.tools.python.tool (PythonAstREPLTool)
try:
tree = ast.parse(output)
module = ast.Module(tree.body[:-1], type_ignores=[])
exec(ast.unparse(module), {}, local_vars) # type: ignore
module_end = ast.Module(tree.body[-1:], type_ignores=[])
module_end_str = ast.unparse(module_end) # type: ignore
try:
return str(eval(module_end_str, {}, local_vars))
except Exception as e:
raise e
except Exception as e:
err_string = (
"There was an error running the output as Python code. "
f"Error message: {e}"
)
traceback.print_exc()
return err_string
class NLPandasQueryEngine(BaseQueryEngine):
"""GPT Pandas query.
Convert natural language to Pandas python code.
Args:
df (pd.DataFrame): Pandas dataframe to use.
instruction_str (Optional[str]): Instruction string to use.
output_processor (Optional[Callable[[str], str]]): Output processor.
A callable that takes in the output string, pandas DataFrame,
and any output kwargs and returns a string.
pandas_prompt (Optional[PandasPrompt]): Pandas prompt to use.
head (int): Number of rows to show in the table context.
"""
def __init__(
self,
index: PandasIndex,
instruction_str: Optional[str] = None,
output_processor: Optional[Callable] = None,
pandas_prompt: Optional[PandasPrompt] = None,
output_kwargs: Optional[dict] = None,
head: int = 5,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self.df = index.df
self._service_context = index.service_context
self._head = head
self._pandas_prompt = pandas_prompt or DEFAULT_PANDAS_PROMPT
self._instruction_str = instruction_str or DEFAULT_INSTRUCTION_STR
self._output_processor = output_processor or default_output_processor
self._output_kwargs = output_kwargs or {}
self._verbose = verbose
super().__init__(self._service_context.callback_manager)
def _get_table_context(self) -> str:
"""Get table context."""
return str(self.df.head(self._head))
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
context = self._get_table_context()
(
pandas_response_str,
formatted_prompt,
) = self._service_context.llm_predictor.predict(
self._pandas_prompt,
df_str=context,
query_str=query_bundle.query_str,
instruction_str=self._instruction_str,
)
if self._verbose:
print_text(f"> Pandas Instructions:\n" f"```\n{pandas_response_str}\n```\n")
pandas_output = self._output_processor(
pandas_response_str,
self.df,
**self._output_kwargs,
)
if self._verbose:
print_text(f"> Pandas Output: {pandas_output}\n")
response_extra_info = {
"pandas_instruction_str": pandas_response_str,
}
return Response(response=pandas_output, extra_info=response_extra_info)
async def _aquery(self, query_bundle: QueryBundle) -> Response:
return self._query(query_bundle)
# legacy
GPTNLPandasQueryEngine = NLPandasQueryEngine
| [] |
2024-01-10 | apecloud/llama_index | llama_index~token_counter~mock_chain_wrapper.py | """Mock chain wrapper."""
from typing import Any, Dict, Optional
from langchain.llms.base import BaseLLM
from llama_index.constants import DEFAULT_NUM_OUTPUTS
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.prompts.base import Prompt
from llama_index.prompts.prompt_type import PromptType
from llama_index.token_counter.utils import (
mock_extract_keywords_response,
mock_extract_kg_triplets_response,
)
from llama_index.utils import globals_helper
# TODO: consolidate with unit tests in tests/mock_utils/mock_predict.py
def _mock_summary_predict(max_tokens: int, prompt_args: Dict) -> str:
"""Mock summary predict."""
# tokens in response shouldn't be larger than tokens in `context_str`
num_text_tokens = len(globals_helper.tokenizer(prompt_args["context_str"]))
token_limit = min(num_text_tokens, max_tokens)
return " ".join(["summary"] * token_limit)
def _mock_insert_predict() -> str:
"""Mock insert predict."""
return "ANSWER: 1"
def _mock_query_select() -> str:
"""Mock query select."""
return "ANSWER: 1"
def _mock_query_select_multiple(num_chunks: int) -> str:
"""Mock query select."""
nums_str = ", ".join([str(i) for i in range(num_chunks)])
return f"ANSWER: {nums_str}"
def _mock_answer(max_tokens: int, prompt_args: Dict) -> str:
"""Mock answer."""
# tokens in response shouldn't be larger than tokens in `text`
num_ctx_tokens = len(globals_helper.tokenizer(prompt_args["context_str"]))
token_limit = min(num_ctx_tokens, max_tokens)
return " ".join(["answer"] * token_limit)
def _mock_refine(max_tokens: int, prompt: Prompt, prompt_args: Dict) -> str:
"""Mock refine."""
# tokens in response shouldn't be larger than tokens in
# `existing_answer` + `context_msg`
# NOTE: if existing_answer is not in prompt_args, we need to get it from the prompt
if "existing_answer" not in prompt_args:
existing_answer = prompt.partial_dict["existing_answer"]
else:
existing_answer = prompt_args["existing_answer"]
num_ctx_tokens = len(globals_helper.tokenizer(prompt_args["context_msg"]))
num_exist_tokens = len(globals_helper.tokenizer(existing_answer))
token_limit = min(num_ctx_tokens + num_exist_tokens, max_tokens)
return " ".join(["answer"] * token_limit)
def _mock_keyword_extract(prompt_args: Dict) -> str:
"""Mock keyword extract."""
return mock_extract_keywords_response(prompt_args["text"])
def _mock_query_keyword_extract(prompt_args: Dict) -> str:
"""Mock query keyword extract."""
return mock_extract_keywords_response(prompt_args["question"])
def _mock_knowledge_graph_triplet_extract(prompt_args: Dict, max_triplets: int) -> str:
"""Mock knowledge graph triplet extract."""
return mock_extract_kg_triplets_response(
prompt_args["text"], max_triplets=max_triplets
)
class MockLLMPredictor(LLMPredictor):
"""Mock LLM Predictor."""
def __init__(
self, max_tokens: int = DEFAULT_NUM_OUTPUTS, llm: Optional[BaseLLM] = None
) -> None:
"""Initialize params."""
super().__init__(llm)
# NOTE: don't call super, we don't want to instantiate LLM
self.max_tokens = max_tokens
self._total_tokens_used = 0
self.flag = True
self._last_token_usage = None
def _predict(self, prompt: Prompt, **prompt_args: Any) -> str:
"""Mock predict."""
prompt_str = prompt.prompt_type
if prompt_str == PromptType.SUMMARY:
return _mock_summary_predict(self.max_tokens, prompt_args)
elif prompt_str == PromptType.TREE_INSERT:
return _mock_insert_predict()
elif prompt_str == PromptType.TREE_SELECT:
return _mock_query_select()
elif prompt_str == PromptType.TREE_SELECT_MULTIPLE:
return _mock_query_select_multiple(prompt_args["num_chunks"])
elif prompt_str == PromptType.REFINE:
return _mock_refine(self.max_tokens, prompt, prompt_args)
elif prompt_str == PromptType.QUESTION_ANSWER:
return _mock_answer(self.max_tokens, prompt_args)
elif prompt_str == PromptType.KEYWORD_EXTRACT:
return _mock_keyword_extract(prompt_args)
elif prompt_str == PromptType.QUERY_KEYWORD_EXTRACT:
return _mock_query_keyword_extract(prompt_args)
elif prompt_str == PromptType.KNOWLEDGE_TRIPLET_EXTRACT:
return _mock_knowledge_graph_triplet_extract(
prompt_args, prompt.partial_dict.get("max_knowledge_triplets", 2)
)
elif prompt_str == PromptType.CUSTOM:
# we don't know specific prompt type, return generic response
return ""
else:
raise ValueError("Invalid prompt type.")
| [] |
2024-01-10 | apecloud/llama_index | llama_index~chat_engine~react.py | from typing import Any, Optional, Sequence
from llama_index.chat_engine.types import BaseChatEngine, ChatHistoryType
from llama_index.chat_engine.utils import is_chat_model, to_langchain_chat_history
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.service_context import ServiceContext
from llama_index.langchain_helpers.agents.agents import (
AgentExecutor,
AgentType,
initialize_agent,
)
from llama_index.llm_predictor.base import LLMPredictor
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.tools.query_engine import QueryEngineTool
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_memory import BaseChatMemory
class ReActChatEngine(BaseChatEngine):
"""ReAct Chat Engine.
Use a ReAct agent loop with query engine tools. Implemented via LangChain agent.
"""
def __init__(
self,
query_engine_tools: Sequence[QueryEngineTool],
service_context: ServiceContext,
memory: BaseChatMemory,
verbose: bool = False,
) -> None:
self._query_engine_tools = query_engine_tools
self._service_context = service_context
self._memory = memory
self._verbose = verbose
self._agent = self._create_agent()
@classmethod
def from_defaults(
cls,
query_engine_tools: Sequence[QueryEngineTool],
service_context: Optional[ServiceContext] = None,
memory: Optional[BaseChatMemory] = None,
chat_history: Optional[ChatHistoryType] = None,
verbose: bool = False,
**kwargs: Any,
) -> "ReActChatEngine":
"""Initialize a ReActChatEngine from default parameters."""
del kwargs # Unused
service_context = service_context or ServiceContext.from_defaults()
if chat_history is not None and memory is not None:
raise ValueError("Cannot specify both memory and chat_history.")
if memory is None:
history = to_langchain_chat_history(chat_history)
memory = ConversationBufferMemory(
memory_key="chat_history",
chat_memory=history,
return_messages=is_chat_model(service_context=service_context),
)
return cls(
query_engine_tools=query_engine_tools,
service_context=service_context,
memory=memory,
verbose=verbose,
)
@classmethod
def from_query_engine(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
service_context: Optional[ServiceContext] = None,
memory: Optional[BaseChatMemory] = None,
chat_history: Optional[ChatHistoryType] = None,
verbose: bool = False,
**kwargs: Any,
) -> "ReActChatEngine":
query_engine_tool = QueryEngineTool.from_defaults(
query_engine=query_engine, name=name, description=description
)
return cls.from_defaults(
query_engine_tools=[query_engine_tool],
service_context=service_context,
memory=memory,
chat_history=chat_history,
verbose=verbose,
**kwargs,
)
def _create_agent(self) -> AgentExecutor:
tools = [qe_tool.as_langchain_tool() for qe_tool in self._query_engine_tools]
if not isinstance(self._service_context.llm_predictor, LLMPredictor):
raise ValueError("Currently only supports LangChain based LLMPredictor.")
llm = self._service_context.llm_predictor.llm
if is_chat_model(service_context=self._service_context):
agent_type = AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION
else:
agent_type = AgentType.CONVERSATIONAL_REACT_DESCRIPTION
return initialize_agent(
tools=tools,
llm=llm,
agent=agent_type,
memory=self._memory,
verbose=self._verbose,
)
def chat(self, message: str) -> RESPONSE_TYPE:
response = self._agent.run(input=message)
return Response(response=response)
async def achat(self, message: str) -> RESPONSE_TYPE:
response = await self._agent.arun(input=message)
return Response(response=response)
def reset(self) -> None:
self._memory = ConversationBufferMemory(
memory_key="chat_history",
return_messages=is_chat_model(service_context=self._service_context),
)
self._agent = self._create_agent()
| [] |
2024-01-10 | apecloud/llama_index | llama_index~evaluation~dataset_generation.py | """Dataset generation from documents"""
from __future__ import annotations
import re
from typing import List, Optional
from langchain.chat_models import ChatOpenAI
from llama_index import (
Document,
ListIndex,
LLMPredictor,
QuestionAnswerPrompt,
ServiceContext,
)
from llama_index.data_structs.node import Node, NodeWithScore
from llama_index.indices.postprocessor.node import KeywordNodePostprocessor
DEFAULT_QUESTION_GENERATION_PROMPT = """Context information is below.\n"
"\n---------------------\n{context_str}\n---------------------\n"
"Given the context information and not prior knowledge.\n"
"generate only questions based on the below query.\n"
"{query_str}\n"
"""
def _get_default_service_context() -> ServiceContext:
"""Get default service context."""
llm_predictor = LLMPredictor(
llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, chunk_size_limit=3000
)
return service_context
class DatasetGenerator:
"""Generate dataset (question/ question-answer pairs) \
based on the given documents.
NOTE: this is a beta feature, subject to change!
Args:
nodes (List[Node]): List of nodes. (Optional)
service_context (ServiceContext): Service Context.
num_questions_per_chunk: number of question to be \
generated per chunk. Each document is chunked of size 512 words.
text_question_template: Question generation template.
"""
def __init__(
self,
nodes: List[Node],
service_context: Optional[ServiceContext] = None,
num_questions_per_chunk: int = 10,
text_question_template: Optional[QuestionAnswerPrompt] = None,
question_gen_query: Optional[str] = None,
required_keywords: Optional[List[str]] = None,
exclude_keywords: Optional[List[str]] = None,
) -> None:
"""Init params."""
if service_context is None:
service_context = _get_default_service_context()
self.service_context = service_context
self.text_question_template = text_question_template or QuestionAnswerPrompt(
DEFAULT_QUESTION_GENERATION_PROMPT
)
self.question_gen_query = (
question_gen_query
or f"You are a Teacher/ Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. Restrict the questions to the \
context information provided."
)
self.nodes = nodes
@classmethod
def from_documents(
cls,
documents: List[Document],
service_context: Optional[ServiceContext] = None,
num_questions_per_chunk: int = 10,
text_question_template: Optional[QuestionAnswerPrompt] = None,
question_gen_query: Optional[str] = None,
required_keywords: Optional[List[str]] = None,
exclude_keywords: Optional[List[str]] = None,
) -> "DatasetGenerator":
"""Generate dataset from documents."""
if service_context is None:
service_context = _get_default_service_context()
nodes = service_context.node_parser.get_nodes_from_documents(documents)
# use node postprocessor to filter nodes
required_keywords = required_keywords or []
exclude_keywords = exclude_keywords or []
node_postprocessor = KeywordNodePostprocessor(
service_context=service_context,
required_keywords=required_keywords,
exclude_keywords=exclude_keywords,
)
node_with_scores = [NodeWithScore(node) for node in nodes]
node_with_scores = node_postprocessor.postprocess_nodes(node_with_scores)
nodes = [node_with_score.node for node_with_score in node_with_scores]
return cls(
nodes=nodes,
service_context=service_context,
num_questions_per_chunk=num_questions_per_chunk,
text_question_template=text_question_template,
question_gen_query=question_gen_query,
)
def _node_question_generator(
self, nodes: List[Node], num: Optional[int] = None
) -> List[str]:
"""Node question generator."""
questions: List[str] = []
for node in nodes:
if num is not None and len(questions) >= num:
break
index = ListIndex.from_documents([Document(node.get_text())])
query_engine = index.as_query_engine(
service_context=self.service_context,
text_qa_template=self.text_question_template,
use_async=True,
)
response = query_engine.query(
self.question_gen_query,
)
result = str(response).strip().split("\n")
cleaned_questions = [
re.sub(r"^\d+[\).\s]", "", question).strip() for question in result
]
questions.extend(cleaned_questions)
questions = [question for question in questions if question != ""]
if num is not None:
questions = questions[:num]
return questions
def generate_questions_from_nodes(self, num: Optional[int] = None) -> List[str]:
"""Generates questions for each document."""
return self._node_question_generator(self.nodes, num)
| [
"Context information is below.\n\"\n\"\n---------------------\n{context_str}\n---------------------\n\"\n\"Given the context information and not prior knowledge.\n\"\n\"generate only questions based on the below query.\n\"\n\"{query_str}\n\"\n"
] |
2024-01-10 | KORINZ/openai-nhk-quiz | grade_response.py | import openai
from openai_key import api_key
openai.api_key = api_key
news_content = "神奈川県鎌倉市で「バリアフリービーチ」というイベントがありました。鎌倉市の医者などが、車いすを使っている人などにも海の中に入って楽しんでもらいたいと考えました。車いすの人と家族などの44のグループが参加しました。たくさんのボランティアが手伝いました。89歳の男性は、孫に手伝ってもらって、特別な車いすで海に入って楽しんでいました。男性は「海の水が気持ちよかったです」と話していました。1歳の女の子も家族と参加しました。女の子の病院の先生と看護師も一緒です。女の子は、水や砂に触ったり、お母さんと一緒に車いすで海に入ったりして、初めての海を楽しみました。お母さんは「娘は少しびっくりしていたようですが、夏のいい思い出になりました」と話していました。"
student_answer = "看護師(かんごし)、病院(びういん)、太平洋(たいへいよう)"
response = openai.ChatCompletion.create(model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": f"You are a Japanese language expert. Your task is to verify the furigana readings of given kanji words extracted from a news article and give a grade. For each kanji word, check if the provided furigana is correct based on the context of the article. If the kanji word is not from the news article, mark as incorrect. Grade the student based on the number of correct readings they provide out of a total of three. The article is as follows: {news_content} The student's answers are: {student_answer}"
},],
temperature=0,
)
grade = response['choices'][0]['message']['content'] # type: ignore
print(grade)
| [
"You are a Japanese language expert. Your task is to verify the furigana readings of given kanji words extracted from a news article and give a grade. For each kanji word, check if the provided furigana is correct based on the context of the article. If the kanji word is not from the news article, mark as incorrect. Grade the student based on the number of correct readings they provide out of a total of three. The article is as follows: 神奈川県鎌倉市で「バリアフリービーチ」というイベントがありました。鎌倉市の医者などが、車いすを使っている人などにも海の中に入って楽しんでもらいたいと考えました。車いすの人と家族などの44のグループが参加しました。たくさんのボランティアが手伝いました。89歳の男性は、孫に手伝ってもらって、特別な車いすで海に入って楽しんでいました。男性は「海の水が気持ちよかったです」と話していました。1歳の女の子も家族と参加しました。女の子の病院の先生と看護師も一緒です。女の子は、水や砂に触ったり、お母さんと一緒に車いすで海に入ったりして、初めての海を楽しみました。お母さんは「娘は少しびっくりしていたようですが、夏のいい思い出になりました」と話していました。 The student's answers are: 看護師(かんごし)、病院(びういん)、太平洋(たいへいよう)"
] |
2024-01-10 | cbmchat/llama_index | program~predefined~df.py | from typing import Any, List, Optional, Type, cast
import pandas as pd
from llama_index.bridge.pydantic import BaseModel, Field
from llama_index.program.base_program import BasePydanticProgram
from llama_index.program.llm_prompt_program import BaseLLMFunctionProgram
from llama_index.program.openai_program import OpenAIPydanticProgram
class DataFrameRow(BaseModel):
"""Row in a DataFrame."""
row_values: List[Any] = Field(
...,
description="List of row values, where each value corresponds to a row key.",
)
class DataFrameColumn(BaseModel):
"""Column in a DataFrame."""
column_name: str = Field(..., description="Column name.")
column_desc: Optional[str] = Field(..., description="Column description.")
class DataFrame(BaseModel):
"""Data-frame class.
Consists of a `rows` field which is a list of dictionaries,
as well as a `columns` field which is a list of column names.
"""
description: Optional[str] = None
columns: List[DataFrameColumn] = Field(..., description="List of column names.")
rows: List[DataFrameRow] = Field(
...,
description="""List of DataFrameRow objects. Each DataFrameRow contains \
valuesin order of the data frame column.""",
)
def to_df(self) -> pd.DataFrame:
"""To dataframe."""
return pd.DataFrame(
[row.row_values for row in self.rows],
columns=[col.column_name for col in self.columns],
)
class DataFrameRowsOnly(BaseModel):
"""Data-frame with rows. Assume column names are already known beforehand."""
rows: List[DataFrameRow] = Field(..., description="""List of row objects.""")
def to_df(self, existing_df: Optional[pd.DataFrame] = None) -> pd.DataFrame:
"""To dataframe."""
if existing_df is None:
return pd.DataFrame([row.row_values for row in self.rows])
else:
new_df = pd.DataFrame([row.row_values for row in self.rows])
new_df.columns = existing_df.columns
# assume row values are in order of column names
return existing_df.append(new_df, ignore_index=True)
class DataFrameValuesPerColumn(BaseModel):
"""Data-frame as a list of column objects.
Each column object contains a list of values. Note that they can be
of variable length, and so may not be able to be converted to a dataframe.
"""
columns: List[DataFrameRow] = Field(..., description="""List of column objects.""")
DEFAULT_FULL_DF_PARSER_TMPL = """
Please extract the following query into a structured data.
Query: {input_str}.
Please extract both the set of column names and row names.
"""
DEFAULT_ROWS_DF_PARSER_TMPL = """
Please extract the following query into structured data.
Query: {input_str}.
The column schema is the following: {column_schema}.
"""
class DFFullProgram(BasePydanticProgram[DataFrame]):
"""Data-frame program.
Extracts text into a schema + datapoints.
"""
def __init__(
self,
pydantic_program_cls: Type[BaseLLMFunctionProgram],
df_parser_template_str: str = DEFAULT_FULL_DF_PARSER_TMPL,
input_key: str = "input_str",
**program_kwargs: Any,
) -> None:
"""Init params."""
pydantic_program = pydantic_program_cls.from_defaults(
DataFrame, df_parser_template_str, **program_kwargs
)
self._validate_program(pydantic_program)
self._pydantic_program = pydantic_program
self._input_key = input_key
@classmethod
def from_defaults(
cls,
pydantic_program_cls: Optional[Type[BaseLLMFunctionProgram]] = None,
df_parser_template_str: str = DEFAULT_FULL_DF_PARSER_TMPL,
input_key: str = "input_str",
) -> "DFFullProgram":
"""Full DF output parser."""
pydantic_program_cls = pydantic_program_cls or OpenAIPydanticProgram
return cls(
pydantic_program_cls,
df_parser_template_str=df_parser_template_str,
input_key=input_key,
)
def _validate_program(self, pydantic_program: BasePydanticProgram) -> None:
if pydantic_program.output_cls != DataFrame:
raise ValueError("Output class of pydantic program must be `DataFrame`.")
@property
def output_cls(self) -> Type[DataFrame]:
"""Output class."""
return DataFrame
def __call__(self, *args: Any, **kwds: Any) -> DataFrame:
"""Call."""
if self._input_key not in kwds:
raise ValueError(f"Input key {self._input_key} not found in kwds.")
result = self._pydantic_program(**{self._input_key: kwds[self._input_key]})
result = cast(DataFrame, result)
return result
class DFRowsProgram(BasePydanticProgram[DataFrameRowsOnly]):
"""DF Rows output parser.
Given DF schema, extract text into a set of rows.
"""
def __init__(
self,
pydantic_program_cls: Type[BaseLLMFunctionProgram],
df_parser_template_str: str = DEFAULT_ROWS_DF_PARSER_TMPL,
column_schema: Optional[str] = None,
input_key: str = "input_str",
**program_kwargs: Any,
) -> None:
"""Init params."""
# partial format df parser template string with column schema
prompt_template_str = df_parser_template_str.replace(
"{column_schema}", column_schema or ""
)
pydantic_program = pydantic_program_cls.from_defaults(
DataFrameRowsOnly, prompt_template_str, **program_kwargs
)
self._validate_program(pydantic_program)
self._pydantic_program = pydantic_program
self._input_key = input_key
def _validate_program(self, pydantic_program: BasePydanticProgram) -> None:
if pydantic_program.output_cls != DataFrameRowsOnly:
raise ValueError(
"Output class of pydantic program must be `DataFramRowsOnly`."
)
@classmethod
def from_defaults(
cls,
pydantic_program_cls: Optional[Type[BaseLLMFunctionProgram]] = None,
df_parser_template_str: str = DEFAULT_ROWS_DF_PARSER_TMPL,
df: Optional[pd.DataFrame] = None,
column_schema: Optional[str] = None,
input_key: str = "input_str",
**kwargs: Any,
) -> "DFRowsProgram":
"""Rows DF output parser."""
pydantic_program_cls = pydantic_program_cls or OpenAIPydanticProgram
# either one of df or column_schema needs to be specified
if df is None and column_schema is None:
raise ValueError(
"Either `df` or `column_schema` must be specified for "
"DFRowsOutputParser."
)
# first, inject the column schema into the template string
if column_schema is None:
assert df is not None
# by default, show column schema and some example values
column_schema = ", ".join(df.columns)
return cls(
pydantic_program_cls,
df_parser_template_str=df_parser_template_str,
column_schema=column_schema,
input_key=input_key,
**kwargs,
)
@property
def output_cls(self) -> Type[DataFrameRowsOnly]:
"""Output class."""
return DataFrameRowsOnly
def __call__(self, *args: Any, **kwds: Any) -> DataFrameRowsOnly:
"""Call."""
if self._input_key not in kwds:
raise ValueError(f"Input key {self._input_key} not found in kwds.")
result = self._pydantic_program(**{self._input_key: kwds[self._input_key]})
result = cast(DataFrameRowsOnly, result)
return result
| [
"{column_schema}"
] |
2024-01-10 | cbmchat/llama_index | llms~portkey_utils.py | """
Utility Tools for the Portkey Class
This file module contains a collection of utility functions designed to enhance
the functionality and usability of the Portkey class
"""
from typing import List, TYPE_CHECKING
from enum import Enum
from llama_index.llms.base import LLMMetadata
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
from llama_index.llms.openai_utils import (
GPT3_5_MODELS,
GPT4_MODELS,
GPT3_MODELS,
TURBO_MODELS,
AZURE_TURBO_MODELS,
)
from llama_index.llms.anthropic_utils import CLAUDE_MODELS
if TYPE_CHECKING:
from rubeus import (
LLMBase,
RubeusResponse,
)
IMPORT_ERROR_MESSAGE = (
"Rubeus is not installed.Please install it with `pip install rubeus`."
)
DISCONTINUED_MODELS = {
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
DEFAULT_MODEL = "gpt-3.5-turbo"
AVAILABLE_INTEGRATIONS = (OpenAI, Anthropic)
CLUADE_MODEL_FULLVERSION_MAP = {
"claude-instant-1": "claude-instant-1.2",
"claude-2": "claude-2.0",
}
ALL_AVAILABLE_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**GPT3_5_MODELS,
**GPT3_MODELS,
**AZURE_TURBO_MODELS,
**CLAUDE_MODELS,
}
CHAT_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**AZURE_TURBO_MODELS,
}
def is_chat_model(model: str) -> bool:
"""Check if a given model is a chat-based language model.
This function takes a model name or identifier as input and determines whether
the model is designed for chat-based language generation, conversation, or
interaction.
Args:
model (str): The name or identifier of the model to be checked.
Returns:
bool: True if the provided model is a chat-based language model,
False otherwise.
"""
return model in CHAT_MODELS
def modelname_to_contextsize(modelname: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = modelname_to_contextsize("text-davinci-003")
"""
# handling finetuned models
if "ft-" in modelname: # legacy fine-tuning
modelname = modelname.split(":")[0]
elif modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname in DISCONTINUED_MODELS:
raise ValueError(
f"Model {modelname} has been discontinued. " "Please choose another model."
)
context_size = ALL_AVAILABLE_MODELS.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def generate_llm_metadata(llm: "LLMBase") -> LLMMetadata:
"""
Generate metadata for a Language Model (LLM) instance.
This function takes an instance of a Language Model (LLM) and generates
metadata based on the provided instance. The metadata includes information
such as the context window, number of output tokens, chat model status,
and model name.
Parameters:
llm (LLM): An instance of a Language Model (LLM) from which metadata
will be generated.
Returns:
LLMMetadata: A data structure containing metadata attributes such as
context window, number of output tokens, chat model status, and
model name.
Raises:
ValueError: If the provided 'llm' is not an instance of
llama_index.llms.base.LLM.
"""
try:
from rubeus import LLMBase
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
if not isinstance(llm, LLMBase):
raise ValueError("llm must be an instance of rubeus.LLMBase")
return LLMMetadata(
_context_window=modelname_to_contextsize(llm.model),
is_chat_model=is_chat_model(llm.model),
model_name=llm.model,
)
def get_llm(response: "RubeusResponse", llms: List["LLMBase"]) -> "LLMBase":
# TODO: Update this logic over here.
try:
from rubeus import LLMBase
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
fallback_llm = LLMBase.construct()
for llm in llms:
model = llm.model
if model == response.model:
fallback_llm = llm
break
if fallback_llm is None:
raise ValueError("Failed to get the fallback LLM")
return fallback_llm
class RubeusApiPaths(str, Enum):
CHAT_COMPLETION = "/v1/chatComplete"
COMPLETION = "/v1/complete"
| [] |
2024-01-10 | cbmchat/llama_index | question_gen~guidance_generator.py | from typing import TYPE_CHECKING, List, Optional, Sequence, cast
from llama_index.indices.query.schema import QueryBundle
from llama_index.program.guidance_program import GuidancePydanticProgram
from llama_index.prompts.guidance_utils import convert_to_handlebars
from llama_index.question_gen.prompts import (
DEFAULT_SUB_QUESTION_PROMPT_TMPL,
build_tools_text,
)
from llama_index.question_gen.types import (
BaseQuestionGenerator,
SubQuestion,
SubQuestionList,
)
from llama_index.tools.types import ToolMetadata
if TYPE_CHECKING:
from guidance.llms import LLM as GuidanceLLM
DEFAULT_GUIDANCE_SUB_QUESTION_PROMPT_TMPL = convert_to_handlebars(
DEFAULT_SUB_QUESTION_PROMPT_TMPL
)
class GuidanceQuestionGenerator(BaseQuestionGenerator):
def __init__(
self,
program: GuidancePydanticProgram,
verbose: bool = False,
) -> None:
self._program = program
self._verbose = verbose
@classmethod
def from_defaults(
cls,
prompt_template_str: str = DEFAULT_GUIDANCE_SUB_QUESTION_PROMPT_TMPL,
guidance_llm: Optional["GuidanceLLM"] = None,
verbose: bool = False,
) -> "GuidanceQuestionGenerator":
program = GuidancePydanticProgram(
output_cls=SubQuestionList,
guidance_llm=guidance_llm,
prompt_template_str=prompt_template_str,
verbose=verbose,
)
return cls(program, verbose)
def generate(
self, tools: Sequence[ToolMetadata], query: QueryBundle
) -> List[SubQuestion]:
tools_str = build_tools_text(tools)
query_str = query.query_str
question_list = self._program(
tools_str=tools_str,
query_str=query_str,
)
question_list = cast(SubQuestionList, question_list)
return question_list.items
async def agenerate(
self, tools: Sequence[ToolMetadata], query: QueryBundle
) -> List[SubQuestion]:
# TODO: currently guidance does not support async calls
return self.generate(tools=tools, query=query)
| [] |
2024-01-10 | cbmchat/llama_index | prompts~guidance_utils.py | from typing import TYPE_CHECKING, Optional, Type, TypeVar
from llama_index.output_parsers.base import OutputParserException
from llama_index.output_parsers.utils import parse_json_markdown
if TYPE_CHECKING:
from guidance import Program
from llama_index.bridge.pydantic import BaseModel
def convert_to_handlebars(text: str) -> str:
"""Convert a python format string to handlebars-style template.
In python format string, single braces {} are used for variable substitution,
and double braces {{}} are used for escaping actual braces (e.g. for JSON dict)
In handlebars template, double braces {{}} are used for variable substitution,
and single braces are actual braces (e.g. for JSON dict)
This is currently only used to convert a python format string based prompt template
to a guidance program template.
"""
# Replace double braces with a temporary placeholder
var_left = "TEMP_BRACE_LEFT"
var_right = "TEMP_BRACE_RIGHT"
text = text.replace("{{", var_left)
text = text.replace("}}", var_right)
# Replace single braces with double braces
text = text.replace("{", "{{")
text = text.replace("}", "}}")
# Replace the temporary placeholder with single braces
text = text.replace(var_left, "{")
text = text.replace(var_right, "}")
return text
def wrap_json_markdown(text: str) -> str:
"""Wrap text in json markdown formatting block."""
return "```json\n" + text + "\n```"
def pydantic_to_guidance_output_template(cls: Type[BaseModel]) -> str:
"""Convert a pydantic model to guidance output template."""
return json_schema_to_guidance_output_template(cls.schema(), root=cls.schema())
def pydantic_to_guidance_output_template_markdown(cls: Type[BaseModel]) -> str:
"""Convert a pydantic model to guidance output template wrapped in json markdown."""
output = json_schema_to_guidance_output_template(cls.schema(), root=cls.schema())
return wrap_json_markdown(output)
def json_schema_to_guidance_output_template(
schema: dict,
key: Optional[str] = None,
indent: int = 0,
root: Optional[dict] = None,
use_pattern_control: bool = False,
) -> str:
"""Convert a json schema to guidance output template.
Implementation based on https://github.com/microsoft/guidance/\
blob/main/notebooks/applications/jsonformer.ipynb
Modified to support nested pydantic models.
"""
out = ""
if "type" not in schema and "$ref" in schema:
if root is None:
raise ValueError("Must specify root schema for nested object")
ref = schema["$ref"]
model = ref.split("/")[-1]
return json_schema_to_guidance_output_template(
root["definitions"][model], key, indent, root
)
if schema["type"] == "object":
out += " " * indent + "{\n"
for k, v in schema["properties"].items():
out += (
" " * (indent + 1)
+ f'"{k}"'
+ ": "
+ json_schema_to_guidance_output_template(v, k, indent + 1, root)
+ ",\n"
)
out += " " * indent + "}"
return out
elif schema["type"] == "array":
if key is None:
raise ValueError("Key should not be None")
if "max_items" in schema:
extra_args = f" max_iterations={schema['max_items']}"
else:
extra_args = ""
return (
"[{{#geneach '"
+ key
+ "' stop=']'"
+ extra_args
+ "}}{{#unless @first}}, {{/unless}}"
+ json_schema_to_guidance_output_template(schema["items"], "this", 0, root)
+ "{{/geneach}}]"
)
elif schema["type"] == "string":
if key is None:
raise ValueError("key should not be None")
return "\"{{gen '" + key + "' stop='\"'}}\""
elif schema["type"] in ["integer", "number"]:
if key is None:
raise ValueError("key should not be None")
if use_pattern_control:
return "{{gen '" + key + "' pattern='[0-9\\.]' stop=','}}"
else:
return "\"{{gen '" + key + "' stop='\"'}}\""
elif schema["type"] == "boolean":
if key is None:
raise ValueError("key should not be None")
return "{{#select '" + key + "'}}True{{or}}False{{/select}}"
else:
schema_type = schema["type"]
raise ValueError(f"Unknown schema type {schema_type}")
Model = TypeVar("Model", bound=BaseModel)
def parse_pydantic_from_guidance_program(
program: "Program", cls: Type[Model], verbose: bool = False
) -> Model:
"""Parse output from guidance program.
This is a temporary solution for parsing a pydantic object out of an executed
guidance program.
NOTE: right now we assume the output is the last markdown formatted json block
NOTE: a better way is to extract via Program.variables, but guidance does not
support extracting nested objects right now.
So we call back to manually parsing the final text after program execution
"""
try:
output = program.text.split("```json")[-1]
output = "```json" + output
if verbose:
print("Raw output:")
print(output)
json_dict = parse_json_markdown(output)
sub_questions = cls.parse_obj(json_dict)
except Exception as e:
raise OutputParserException(
"Failed to parse pydantic object from guidance program"
) from e
return sub_questions
| [] |
2024-01-10 | cbmchat/llama_index | evaluation~dataset_generation.py | """Dataset generation from documents"""
from __future__ import annotations
import re
from typing import List, Optional
from llama_index import (
Document,
SummaryIndex,
ServiceContext,
)
from llama_index.llms.openai import OpenAI
from llama_index.schema import BaseNode, NodeWithScore, MetadataMode
from llama_index.indices.postprocessor.node import KeywordNodePostprocessor
# DEFAULT_QUESTION_GENERATION_PROMPT = """Context information is below.\n"
# "\n---------------------\n{context_str}\n---------------------\n"
# "Given the context information and not prior knowledge.\n"
# "generate only questions based on the below query.\n"
# "{query_str}\n"
# """
DEFAULT_QUESTION_GENERATION_PROMPT = """上下文信息如下。\n"
"\n---------------------\n{context_str}\n-------------------- --\n"
"给出上下文信息而不是先验知识。\n"
"仅根据以下查询生成问题。\n"
"{query_str}\n"
"""
def _get_default_service_context() -> ServiceContext:
"""Get default service context."""
llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
service_context = ServiceContext.from_defaults(llm=llm, chunk_size_limit=3000)
return service_context
class DatasetGenerator:
"""Generate dataset (question/ question-answer pairs) \
based on the given documents.
NOTE: this is a beta feature, subject to change!
Args:
nodes (List[Node]): List of nodes. (Optional)
service_context (ServiceContext): Service Context.
num_questions_per_chunk: number of question to be \
generated per chunk. Each document is chunked of size 512 words.
text_question_template: Question generation template.
"""
def __init__(
self,
nodes: List[BaseNode],
service_context: Optional[ServiceContext] = None,
num_questions_per_chunk: int = 10,
text_question_template: Optional[QuestionAnswerPrompt] = None,
question_gen_query: Optional[str] = None,
required_keywords: Optional[List[str]] = None,
exclude_keywords: Optional[List[str]] = None,
) -> None:
"""Init params."""
if service_context is None:
service_context = _get_default_service_context()
self.service_context = service_context
self.text_question_template = text_question_template or QuestionAnswerPrompt(
DEFAULT_QUESTION_GENERATION_PROMPT
)
self.question_gen_query = (
question_gen_query
or f"You are a Teacher/ Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. Restrict the questions to the \
context information provided."
)
self.nodes = nodes
@classmethod
def from_documents(
cls,
documents: List[Document],
service_context: Optional[ServiceContext] = None,
num_questions_per_chunk: int = 10,
text_question_template: Optional[QuestionAnswerPrompt] = None,
question_gen_query: Optional[str] = None,
required_keywords: Optional[List[str]] = None,
exclude_keywords: Optional[List[str]] = None,
) -> "DatasetGenerator":
"""Generate dataset from documents."""
if service_context is None:
service_context = _get_default_service_context()
nodes = service_context.node_parser.get_nodes_from_documents(documents)
# use node postprocessor to filter nodes
required_keywords = required_keywords or []
exclude_keywords = exclude_keywords or []
node_postprocessor = KeywordNodePostprocessor(
service_context=service_context,
required_keywords=required_keywords,
exclude_keywords=exclude_keywords,
)
node_with_scores = [NodeWithScore(node=node) for node in nodes]
node_with_scores = node_postprocessor.postprocess_nodes(node_with_scores)
nodes = [node_with_score.node for node_with_score in node_with_scores]
return cls(
nodes=nodes,
service_context=service_context,
num_questions_per_chunk=num_questions_per_chunk,
text_question_template=text_question_template,
question_gen_query=question_gen_query,
)
def _node_question_generator(
self, nodes: List[BaseNode], num: Optional[int] = None
) -> List[str]:
"""Node question generator."""
questions: List[str] = []
for node in nodes:
if num is not None and len(questions) >= num:
break
index = SummaryIndex.from_documents(
[
Document(
text=node.get_content(metadata_mode=MetadataMode.NONE),
metadata=node.metadata,
)
]
)
query_engine = index.as_query_engine(
service_context=self.service_context,
text_qa_template=self.text_question_template,
use_async=True,
)
response = query_engine.query(
self.question_gen_query,
)
result = str(response).strip().split("\n")
cleaned_questions = [
re.sub(r"^\d+[\).\s]", "", question).strip() for question in result
]
questions.extend(cleaned_questions)
questions = [question for question in questions if question != ""]
if num is not None:
questions = questions[:num]
return questions
def generate_questions_from_nodes(self, num: Optional[int] = None) -> List[str]:
"""Generates questions for each document."""
return self._node_question_generator(self.nodes, num)
| [
"上下文信息如下。\n\"\n\"\n---------------------\n{context_str}\n-------------------- --\n\"\n\"给出上下文信息而不是先验知识。\n\"\n\"仅根据以下查询生成问题。\n\"\n\"{query_str}\n\"\n"
] |
2024-01-10 | cbmchat/llama_index | chat_engine~types.py | import asyncio
import logging
import queue
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
from threading import Event
from typing import AsyncGenerator, Generator, List, Optional, Union
from llama_index.llms.base import ChatMessage, ChatResponseAsyncGen, ChatResponseGen
from llama_index.memory import BaseMemory
from llama_index.response.schema import Response, StreamingResponse
from llama_index.schema import NodeWithScore
from llama_index.tools import ToolOutput
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
def is_function(message: ChatMessage) -> bool:
"""Utility for ChatMessage responses from OpenAI models"""
return "function_call" in message.additional_kwargs
class ChatResponseMode(str, Enum):
"""Flag toggling waiting/streaming in `Agent._chat`"""
WAIT = "wait"
STREAM = "stream"
@dataclass
class AgentChatResponse:
"""Agent chat response."""
response: str = ""
sources: List[ToolOutput] = field(default_factory=list)
source_nodes: List[NodeWithScore] = field(default_factory=list)
def __post_init__(self) -> None:
if self.sources and not self.source_nodes:
for tool_output in self.sources:
if isinstance(tool_output.raw_output, (Response, StreamingResponse)):
self.source_nodes.extend(tool_output.raw_output.source_nodes)
def __str__(self) -> str:
return self.response
@dataclass
class StreamingAgentChatResponse:
"""Streaming chat response to user and writing to chat history."""
response: str = ""
sources: List[ToolOutput] = field(default_factory=list)
chat_stream: Optional[ChatResponseGen] = None
achat_stream: Optional[ChatResponseAsyncGen] = None
source_nodes: List[NodeWithScore] = field(default_factory=list)
_queue: queue.Queue = field(default_factory=queue.Queue)
_aqueue: asyncio.Queue = field(default_factory=asyncio.Queue)
# flag when chat message is a function call
_is_function: Optional[bool] = None
# flag when processing done
_is_done = False
# signal when a new item is added to the queue
_new_item_event: asyncio.Event = field(default_factory=asyncio.Event)
# NOTE: async code uses two events rather than one since it yields
# control when waiting for queue item
# signal when the OpenAI functions stop executing
_is_function_false_event: asyncio.Event = field(default_factory=asyncio.Event)
# signal when an OpenAI function is being executed
_is_function_not_none_thread_event: Event = field(default_factory=Event)
def __post_init__(self) -> None:
if self.sources and not self.source_nodes:
for tool_output in self.sources:
if isinstance(tool_output.raw_output, (Response, StreamingResponse)):
self.source_nodes.extend(tool_output.raw_output.source_nodes)
def __str__(self) -> str:
if self._is_done and not self._queue.empty() and not self._is_function:
for delta in self._queue.queue:
self.response += delta
return self.response
def put_in_queue(self, delta: Optional[str]) -> None:
self._queue.put_nowait(delta)
self._is_function_not_none_thread_event.set()
def aput_in_queue(self, delta: Optional[str]) -> None:
self._aqueue.put_nowait(delta)
self._new_item_event.set()
def write_response_to_history(self, memory: BaseMemory) -> None:
if self.chat_stream is None:
raise ValueError(
"chat_stream is None. Cannot write to history without chat_stream."
)
# try/except to prevent hanging on error
try:
final_text = ""
for chat in self.chat_stream:
self._is_function = is_function(chat.message)
self.put_in_queue(chat.delta)
final_text += chat.delta or ""
if self._is_function is not None: # if loop has gone through iteration
# NOTE: this is to handle the special case where we consume some of the
# chat stream, but not all of it (e.g. in react agent)
chat.message.content = final_text # final message
memory.put(chat.message)
except Exception as e:
logger.warning(f"Encountered exception writing response to history: {e}")
self._is_done = True
async def awrite_response_to_history(
self,
memory: BaseMemory,
) -> None:
if self.achat_stream is None:
raise ValueError(
"achat_stream is None. Cannot asynchronously write to "
"history without achat_stream."
)
# try/except to prevent hanging on error
try:
final_text = ""
async for chat in self.achat_stream:
self._is_function = is_function(chat.message)
self.aput_in_queue(chat.delta)
final_text += chat.delta or ""
if self._is_function is False:
self._is_function_false_event.set()
if self._is_function is not None: # if loop has gone through iteration
# NOTE: this is to handle the special case where we consume some of the
# chat stream, but not all of it (e.g. in react agent)
chat.message.content = final_text # final message
memory.put(chat.message)
except Exception as e:
logger.warning(f"Encountered exception writing response to history: {e}")
self._is_done = True
# These act as is_done events for any consumers waiting
self._is_function_false_event.set()
self._new_item_event.set()
@property
def response_gen(self) -> Generator[str, None, None]:
while not self._is_done or not self._queue.empty():
try:
delta = self._queue.get(block=False)
self.response += delta
yield delta
except queue.Empty:
# Queue is empty, but we're not done yet
continue
async def async_response_gen(self) -> AsyncGenerator[str, None]:
while not self._is_done or not self._aqueue.empty():
if not self._aqueue.empty():
delta = self._aqueue.get_nowait()
self.response += delta
yield delta
else:
await self._new_item_event.wait() # Wait until a new item is added
self._new_item_event.clear() # Clear the event for the next wait
def print_response_stream(self) -> None:
for token in self.response_gen:
print(token, end="", flush=True)
AGENT_CHAT_RESPONSE_TYPE = Union[AgentChatResponse, StreamingAgentChatResponse]
class BaseChatEngine(ABC):
"""Base Chat Engine."""
@abstractmethod
def reset(self) -> None:
"""Reset conversation state."""
pass
@abstractmethod
def chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AGENT_CHAT_RESPONSE_TYPE:
"""Main chat interface."""
pass
@abstractmethod
def stream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
"""Stream chat interface."""
pass
@abstractmethod
async def achat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AGENT_CHAT_RESPONSE_TYPE:
"""Async version of main chat interface."""
pass
@abstractmethod
async def astream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
"""Async version of main chat interface."""
pass
def chat_repl(self) -> None:
"""Enter interactive chat REPL."""
print("===== Entering Chat REPL =====")
print('Type "exit" to exit.\n')
self.reset()
message = input("Human: ")
while message != "exit":
response = self.chat(message)
print(f"Assistant: {response}\n")
message = input("Human: ")
@property
@abstractmethod
def chat_history(self) -> List[ChatMessage]:
pass
class ChatMode(str, Enum):
"""Chat Engine Modes."""
SIMPLE = "simple"
"""Corresponds to `SimpleChatEngine`.
Chat with LLM, without making use of a knowledge base.
"""
CONDENSE_QUESTION = "condense_question"
"""Corresponds to `CondenseQuestionChatEngine`.
First generate a standalone question from conversation context and last message,
then query the query engine for a response.
"""
CONTEXT = "context"
"""Corresponds to `ContextChatEngine`.
First retrieve text from the index using the user's message, then use the context
in the system prompt to generate a response.
"""
REACT = "react"
"""Corresponds to `ReActAgent`.
Use a ReAct agent loop with query engine tools.
"""
OPENAI = "openai"
"""Corresponds to `OpenAIAgent`.
Use an OpenAI function calling agent loop.
NOTE: only works with OpenAI models that support function calling API.
"""
BEST = "best"
"""Select the best chat engine based on the current LLM.
Corresponds to `OpenAIAgent` if using an OpenAI model that supports
function calling API, otherwise, corresponds to `ReActAgent`.
"""
| [] |
2024-01-10 | cbmchat/llama_index | llama_index~agent~context_retriever_agent.py | """Context retriever agent."""
from typing import List, Optional, Type, Union
from llama_index.agent.openai_agent import (
DEFAULT_MAX_FUNCTION_CALLS,
DEFAULT_MODEL_NAME,
BaseOpenAIAgent,
)
from llama_index.callbacks import CallbackManager
from llama_index.chat_engine.types import (
AgentChatResponse,
)
from llama_index.indices.base_retriever import BaseRetriever
from llama_index.llms.base import LLM, ChatMessage
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai_utils import is_function_calling_model
from llama_index.memory import BaseMemory, ChatMemoryBuffer
from llama_index.prompts.prompts import QuestionAnswerPrompt
from llama_index.schema import NodeWithScore
from llama_index.tools import BaseTool
from llama_index.utils import print_text
# inspired by DEFAULT_QA_PROMPT_TMPL from llama_index/prompts/default_prompts.py
# DEFAULT_QA_PROMPT_TMPL = (
# "Context information is below.\n"
# "---------------------\n"
# "{context_str}\n"
# "---------------------\n"
# "Given the context information and not prior knowledge, "
# "either pick the corresponding tool or answer the function: {query_str}\n"
# )
DEFAULT_QA_PROMPT_TMPL = (
"上下文信息如下。\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"鉴于上下文信息而不是先验知识,"
"选择相应的工具或回答函数:{query_str}\n"
)
DEFAULT_QA_PROMPT = QuestionAnswerPrompt(DEFAULT_QA_PROMPT_TMPL)
class ContextRetrieverOpenAIAgent(BaseOpenAIAgent):
"""ContextRetriever OpenAI Agent.
This agent performs retrieval from BaseRetriever before
calling the LLM. Allows it to augment user message with context.
NOTE: this is a beta feature, function interfaces might change.
Args:
tools (List[BaseTool]): A list of tools.
retriever (BaseRetriever): A retriever.
qa_prompt (Optional[QuestionAnswerPrompt]): A QA prompt.
context_separator (str): A context separator.
llm (Optional[OpenAI]): An OpenAI LLM.
chat_history (Optional[List[ChatMessage]]): A chat history.
prefix_messages: List[ChatMessage]: A list of prefix messages.
verbose (bool): Whether to print debug statements.
max_function_calls (int): Maximum number of function calls.
callback_manager (Optional[CallbackManager]): A callback manager.
"""
def __init__(
self,
tools: List[BaseTool],
retriever: BaseRetriever,
qa_prompt: QuestionAnswerPrompt,
context_separator: str,
llm: OpenAI,
memory: BaseMemory,
prefix_messages: List[ChatMessage],
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
) -> None:
super().__init__(
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
)
self._tools = tools
self._qa_prompt = qa_prompt
self._retriever = retriever
self._context_separator = context_separator
@classmethod
def from_tools_and_retriever(
cls,
tools: List[BaseTool],
retriever: BaseRetriever,
qa_prompt: Optional[QuestionAnswerPrompt] = None,
context_separator: str = "\n",
llm: Optional[LLM] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
) -> "ContextRetrieverOpenAIAgent":
"""Create a ContextRetrieverOpenAIAgent from a retriever.
Args:
retriever (BaseRetriever): A retriever.
qa_prompt (Optional[QuestionAnswerPrompt]): A QA prompt.
context_separator (str): A context separator.
llm (Optional[OpenAI]): An OpenAI LLM.
chat_history (Optional[ChatMessageHistory]): A chat history.
verbose (bool): Whether to print debug statements.
max_function_calls (int): Maximum number of function calls.
callback_manager (Optional[CallbackManager]): A callback manager.
"""
qa_prompt = qa_prompt or DEFAULT_QA_PROMPT
chat_history = chat_history or []
llm = llm or OpenAI(model=DEFAULT_MODEL_NAME)
if not isinstance(llm, OpenAI):
raise ValueError("llm must be a OpenAI instance")
if callback_manager is not None:
llm.callback_manager = callback_manager
memory = memory or memory_cls.from_defaults(chat_history=chat_history, llm=llm)
if not is_function_calling_model(llm.model):
raise ValueError(
f"Model name {llm.model} does not support function calling API."
)
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
return cls(
tools=tools,
retriever=retriever,
qa_prompt=qa_prompt,
context_separator=context_separator,
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
)
def _get_tools(self, message: str) -> List[BaseTool]:
"""Get tools."""
return self._tools
def _build_formatted_message(self, message: str) -> str:
# augment user message
retrieved_nodes_w_scores: List[NodeWithScore] = self._retriever.retrieve(
message
)
retrieved_nodes = [node.node for node in retrieved_nodes_w_scores]
retrieved_texts = [node.get_content() for node in retrieved_nodes]
# format message
context_str = self._context_separator.join(retrieved_texts)
return self._qa_prompt.format(context_str=context_str, query_str=message)
def chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
function_call: Union[str, dict] = "auto",
) -> AgentChatResponse:
"""Chat."""
formatted_message = self._build_formatted_message(message)
if self._verbose:
print_text(formatted_message + "\n", color="yellow")
return super().chat(
formatted_message, chat_history=chat_history, function_call=function_call
)
async def achat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
function_call: Union[str, dict] = "auto",
) -> AgentChatResponse:
"""Chat."""
formatted_message = self._build_formatted_message(message)
if self._verbose:
print_text(formatted_message + "\n", color="yellow")
return await super().achat(
formatted_message, chat_history=chat_history, function_call=function_call
)
def get_tools(self, message: str) -> List[BaseTool]:
"""Get tools."""
return self._get_tools(message)
| [
"上下文信息如下。\n---------------------\n{context_str}\n---------------------\n鉴于上下文信息而不是先验知识,选择相应的工具或回答函数:{query_str}\n"
] |
2024-01-10 | cbmchat/llama_index | response_synthesizers~refine.py | import logging
from typing import Any, Generator, Optional, Sequence, cast, Type, Callable
from llama_index.bridge.pydantic import BaseModel, Field
from llama_index.indices.service_context import ServiceContext
from llama_index.prompts.base import BasePromptTemplate, PromptTemplate
from llama_index.llm_predictor.base import BaseLLMPredictor
from llama_index.indices.utils import truncate_text
from llama_index.prompts.default_prompt_selectors import (
DEFAULT_REFINE_PROMPT_SEL,
DEFAULT_TEXT_QA_PROMPT_SEL,
)
from llama_index.response.utils import get_response_text
from llama_index.response_synthesizers.base import BaseSynthesizer
from llama_index.types import RESPONSE_TEXT_TYPE
from llama_index.program.base_program import BasePydanticProgram
from llama_index.program.openai_program import OpenAIPydanticProgram
from llama_index.program.llm_program import LLMTextCompletionProgram
from llama_index.output_parsers.pydantic import PydanticOutputParser
logger = logging.getLogger(__name__)
class StructuredRefineResponse(BaseModel):
"""
Used to answer a given query based on the provided context.
Also indicates if the query was satisfied with the provided answer.
"""
answer: str = Field(
description="The answer for the given query, based on the context and not "
"prior knowledge."
)
query_satisfied: bool = Field(
description="True if there was enough context given to provide an answer "
"that satisfies the query."
)
class DefaultRefineProgram(BasePydanticProgram):
"""
Runs the query on the LLM as normal and always returns the answer with
query_satisfied=True. In effect, doesn't do any answer filtering.
"""
def __init__(self, prompt: BasePromptTemplate, llm_predictor: BaseLLMPredictor):
self._prompt = prompt
self._llm_predictor = llm_predictor
@property
def output_cls(self) -> Type[BaseModel]:
return StructuredRefineResponse
def __call__(self, *args: Any, **kwds: Any) -> StructuredRefineResponse:
answer = self._llm_predictor.predict(
self._prompt,
**kwds,
)
return StructuredRefineResponse(answer=answer, query_satisfied=True)
async def acall(self, *args: Any, **kwds: Any) -> StructuredRefineResponse:
answer = await self._llm_predictor.apredict(
self._prompt,
**kwds,
)
return StructuredRefineResponse(answer=answer, query_satisfied=True)
class Refine(BaseSynthesizer):
"""Refine a response to a query across text chunks."""
def __init__(
self,
service_context: Optional[ServiceContext] = None,
text_qa_template: Optional[BasePromptTemplate] = None,
refine_template: Optional[BasePromptTemplate] = None,
streaming: bool = False,
verbose: bool = False,
structured_answer_filtering: bool = False,
program_factory: Optional[
Callable[[BasePromptTemplate], BasePydanticProgram]
] = None,
) -> None:
super().__init__(service_context=service_context, streaming=streaming)
self._text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT_SEL
self._refine_template = refine_template or DEFAULT_REFINE_PROMPT_SEL
self._verbose = verbose
self._structured_answer_filtering = structured_answer_filtering
if self._streaming and self._structured_answer_filtering:
raise ValueError(
"Streaming not supported with structured answer filtering."
)
if not self._structured_answer_filtering and program_factory is not None:
raise ValueError(
"Program factory not supported without structured answer filtering."
)
self._program_factory = program_factory or self._default_program_factory
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Give response over chunks."""
prev_response_obj = cast(
Optional[RESPONSE_TEXT_TYPE], response_kwargs.get("prev_response", None)
)
response: Optional[RESPONSE_TEXT_TYPE] = None
for text_chunk in text_chunks:
if prev_response_obj is None:
# if this is the first chunk, and text chunk already
# is an answer, then return it
response = self._give_response_single(
query_str,
text_chunk,
)
else:
response = self._refine_response_single(
prev_response_obj, query_str, text_chunk
)
prev_response_obj = response
if isinstance(response, str):
response = response or "Empty Response"
else:
response = cast(Generator, response)
return response
def _default_program_factory(self, prompt: PromptTemplate) -> BasePydanticProgram:
if self._structured_answer_filtering:
try:
return OpenAIPydanticProgram.from_defaults(
StructuredRefineResponse,
prompt=prompt,
llm=self._service_context.llm,
verbose=self._verbose,
)
except ValueError:
output_parser = PydanticOutputParser(StructuredRefineResponse)
return LLMTextCompletionProgram.from_defaults(
output_parser,
prompt=prompt,
llm=self._service_context.llm,
verbose=self._verbose,
)
else:
return DefaultRefineProgram(
prompt=prompt,
llm_predictor=self._service_context.llm_predictor,
)
def _give_response_single(
self,
query_str: str,
text_chunk: str,
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Give response given a query and a corresponding text chunk."""
text_qa_template = self._text_qa_template.partial_format(query_str=query_str)
text_chunks = self._service_context.prompt_helper.repack(
text_qa_template, [text_chunk]
)
response: Optional[RESPONSE_TEXT_TYPE] = None
program = self._program_factory(text_qa_template)
# TODO: consolidate with loop in get_response_default
for cur_text_chunk in text_chunks:
query_satisfied = False
if response is None and not self._streaming:
structured_response = cast(
StructuredRefineResponse, program(context_str=cur_text_chunk)
)
query_satisfied = structured_response.query_satisfied
if query_satisfied:
response = structured_response.answer
elif response is None and self._streaming:
response = self._service_context.llm_predictor.stream(
text_qa_template,
context_str=cur_text_chunk,
)
query_satisfied = True
else:
response = self._refine_response_single(
cast(RESPONSE_TEXT_TYPE, response),
query_str,
cur_text_chunk,
)
if response is None:
response = "Empty Response"
if isinstance(response, str):
response = response or "Empty Response"
else:
response = cast(Generator, response)
return response
def _refine_response_single(
self,
response: RESPONSE_TEXT_TYPE,
query_str: str,
text_chunk: str,
**response_kwargs: Any,
) -> Optional[RESPONSE_TEXT_TYPE]:
"""Refine response."""
# TODO: consolidate with logic in response/schema.py
if isinstance(response, Generator):
response = get_response_text(response)
fmt_text_chunk = truncate_text(text_chunk, 50)
logger.debug(f"> Refine context: {fmt_text_chunk}")
if self._verbose:
print(f"> Refine context: {fmt_text_chunk}")
# NOTE: partial format refine template with query_str and existing_answer here
refine_template = self._refine_template.partial_format(
query_str=query_str, existing_answer=response
)
text_chunks = self._service_context.prompt_helper.repack(
refine_template, text_chunks=[text_chunk]
)
program = self._program_factory(refine_template)
for cur_text_chunk in text_chunks:
query_satisfied = False
if not self._streaming:
structured_response = cast(
StructuredRefineResponse, program(context_msg=cur_text_chunk)
)
query_satisfied = structured_response.query_satisfied
if query_satisfied:
response = structured_response.answer
else:
response = self._service_context.llm_predictor.stream(
refine_template,
context_msg=cur_text_chunk,
)
query_satisfied = True
if query_satisfied:
refine_template = self._refine_template.partial_format(
query_str=query_str, existing_answer=response
)
return response
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
prev_response_obj = cast(
Optional[RESPONSE_TEXT_TYPE], response_kwargs.get("prev_response", None)
)
response: Optional[RESPONSE_TEXT_TYPE] = None
for text_chunk in text_chunks:
if prev_response_obj is None:
# if this is the first chunk, and text chunk already
# is an answer, then return it
response = await self._agive_response_single(
query_str,
text_chunk,
)
else:
response = await self._arefine_response_single(
prev_response_obj, query_str, text_chunk
)
prev_response_obj = response
if response is None:
response = "Empty Response"
if isinstance(response, str):
response = response or "Empty Response"
else:
response = cast(Generator, response)
return response
async def _arefine_response_single(
self,
response: RESPONSE_TEXT_TYPE,
query_str: str,
text_chunk: str,
**response_kwargs: Any,
) -> Optional[RESPONSE_TEXT_TYPE]:
"""Refine response."""
# TODO: consolidate with logic in response/schema.py
if isinstance(response, Generator):
response = get_response_text(response)
fmt_text_chunk = truncate_text(text_chunk, 50)
logger.debug(f"> Refine context: {fmt_text_chunk}")
# NOTE: partial format refine template with query_str and existing_answer here
refine_template = self._refine_template.partial_format(
query_str=query_str, existing_answer=response
)
text_chunks = self._service_context.prompt_helper.repack(
refine_template, text_chunks=[text_chunk]
)
program = self._program_factory(refine_template)
for cur_text_chunk in text_chunks:
query_satisfied = False
if not self._streaming:
structured_response = await program.acall(context_msg=cur_text_chunk)
structured_response = cast(
StructuredRefineResponse, structured_response
)
query_satisfied = structured_response.query_satisfied
if query_satisfied:
response = structured_response.answer
else:
raise ValueError("Streaming not supported for async")
if query_satisfied:
refine_template = self._refine_template.partial_format(
query_str=query_str, existing_answer=response
)
return response
async def _agive_response_single(
self,
query_str: str,
text_chunk: str,
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Give response given a query and a corresponding text chunk."""
text_qa_template = self._text_qa_template.partial_format(query_str=query_str)
text_chunks = self._service_context.prompt_helper.repack(
text_qa_template, [text_chunk]
)
response: Optional[RESPONSE_TEXT_TYPE] = None
program = self._program_factory(text_qa_template)
# TODO: consolidate with loop in get_response_default
for cur_text_chunk in text_chunks:
if response is None and not self._streaming:
structured_response = await program.acall(context_str=cur_text_chunk)
structured_response = cast(
StructuredRefineResponse, structured_response
)
query_satisfied = structured_response.query_satisfied
if query_satisfied:
response = structured_response.answer
elif response is None and self._streaming:
raise ValueError("Streaming not supported for async")
else:
response = await self._arefine_response_single(
cast(RESPONSE_TEXT_TYPE, response),
query_str,
cur_text_chunk,
)
if response is None:
response = "Empty Response"
if isinstance(response, str):
response = response or "Empty Response"
else:
response = cast(Generator, response)
return response
| [] |
2024-01-10 | cbmchat/llama_index | finetuning~embeddings~sentence_transformer.py | """Sentence Transformer Finetuning Engine."""
from llama_index.embeddings.base import BaseEmbedding
from typing import Dict, Any, List, Optional
from llama_index.bridge.pydantic import BaseModel
from llama_index.schema import TextNode, MetadataMode
from llama_index.llms.openai import OpenAI
from llama_index.llms.base import LLM
from llama_index.embeddings.utils import resolve_embed_model
from llama_index.finetuning.types import BaseEmbeddingFinetuneEngine
from tqdm import tqdm
import uuid
import re
import json
class EmbeddingQAFinetuneDataset(BaseModel):
"""Embedding QA Finetuning Dataset."""
queries: Dict[str, str] # dict id -> query
corpus: Dict[str, str] # dict id -> string
relevant_docs: Dict[str, List[str]] # query id -> list of doc ids
def save_json(self, path: str) -> None:
"""Save json."""
with open(path, "w") as f:
json.dump(self.dict(), f, indent=4)
@classmethod
def from_json(cls, path: str) -> "EmbeddingQAFinetuneDataset":
"""Load json."""
with open(path, "r") as f:
data = json.load(f)
return cls(**data)
DEFAULT_QA_GENERATE_PROMPT_TMPL = """\
Context information is below.
---------------------
{context_str}
---------------------
Given the context information and not prior knowledge.
generate only questions based on the below query.
You are a Teacher/ Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. Restrict the questions to the \
context information provided."
"""
# generate queries as a convenience function
def generate_qa_embedding_pairs(
nodes: List[TextNode],
llm: Optional[LLM] = None,
qa_generate_prompt_tmpl: str = DEFAULT_QA_GENERATE_PROMPT_TMPL,
num_questions_per_chunk: int = 2,
) -> EmbeddingQAFinetuneDataset:
"""Generate examples given a set of nodes."""
node_dict = {
node.node_id: node.get_content(metadata_mode=MetadataMode.NONE)
for node in nodes
}
llm = llm or OpenAI(model="gpt-3.5-turbo")
queries = {}
relevant_docs = {}
for node_id, text in tqdm(node_dict.items()):
query = qa_generate_prompt_tmpl.format(
context_str=text, num_questions_per_chunk=num_questions_per_chunk
)
response = llm.complete(query)
result = str(response).strip().split("\n")
questions = [
re.sub(r"^\d+[\).\s]", "", question).strip() for question in result
]
questions = [question for question in questions if len(question) > 0]
for question in questions:
question_id = str(uuid.uuid4())
queries[question_id] = question
relevant_docs[question_id] = [node_id]
# construct dataset
dataset = EmbeddingQAFinetuneDataset(
queries=queries, corpus=node_dict, relevant_docs=relevant_docs
)
return dataset
class SentenceTransformersFinetuneEngine(BaseEmbeddingFinetuneEngine):
"""Sentence Transformers Finetune Engine."""
def __init__(
self,
dataset: EmbeddingQAFinetuneDataset,
model_id: str = "BAAI/bge-small-en",
model_output_path: str = "exp_finetune",
batch_size: int = 10,
val_dataset: Optional[EmbeddingQAFinetuneDataset] = None,
loss: Optional[Any] = None,
epochs: int = 2,
show_progress_bar: bool = True,
evaluation_steps: int = 50,
) -> None:
"""Init params."""
from sentence_transformers import InputExample, SentenceTransformer, losses
from torch.utils.data import DataLoader
self.dataset = dataset
self.model_id = model_id
self.model_output_path = model_output_path
self.model = SentenceTransformer(model_id)
# TODO: support more than 1 doc per query
examples: Any = []
for query_id, query in dataset.queries.items():
node_id = dataset.relevant_docs[query_id][0]
text = dataset.corpus[node_id]
example = InputExample(texts=[query, text])
examples.append(example)
self.examples = examples
self.loader: DataLoader = DataLoader(examples, batch_size=batch_size)
# define evaluator
from sentence_transformers.evaluation import InformationRetrievalEvaluator
evaluator: Optional[InformationRetrievalEvaluator] = None
if val_dataset is not None:
evaluator = InformationRetrievalEvaluator(
val_dataset.queries, val_dataset.corpus, val_dataset.relevant_docs
)
self.evaluator = evaluator
# define loss
self.loss = loss or losses.MultipleNegativesRankingLoss(self.model)
self.epochs = epochs
self.show_progress_bar = show_progress_bar
self.evaluation_steps = evaluation_steps
self.warmup_steps = int(len(self.loader) * epochs * 0.1)
def finetune(self, **train_kwargs: Any) -> None:
"""Finetune model."""
self.model.fit(
train_objectives=[(self.loader, self.loss)],
epochs=self.epochs,
warmup_steps=self.warmup_steps,
output_path=self.model_output_path,
show_progress_bar=self.show_progress_bar,
evaluator=self.evaluator,
evaluation_steps=self.evaluation_steps,
)
def get_finetuned_model(self, **model_kwargs: Any) -> BaseEmbedding:
"""Gets finetuned model."""
embed_model_str = "local:" + self.model_output_path
embed_model = resolve_embed_model(embed_model_str)
return embed_model
| [
"Context information is below.\n\n---------------------\n{context_str}\n---------------------\n\nGiven the context information and not prior knowledge.\ngenerate only questions based on the below query.\n\nYou are a Teacher/ Professor. Your task is to setup {num_questions_per_chunk} questions for an upcoming quiz/examination. The questions should be diverse in nature across the document. Restrict the questions to the context information provided.\"\n"
] |
2024-01-10 | cbmchat/llama_index | embeddings~loading.py | from typing import Dict, Type
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.google import GoogleUnivSentEncoderEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.embeddings.langchain import LangchainEmbedding
from llama_index.embeddings.utils import resolve_embed_model
from llama_index.token_counter.mock_embed_model import MockEmbedding
RECOGNIZED_EMBEDDINGS: Dict[str, Type[BaseEmbedding]] = {
GoogleUnivSentEncoderEmbedding.class_name(): GoogleUnivSentEncoderEmbedding,
OpenAIEmbedding.class_name(): OpenAIEmbedding,
LangchainEmbedding.class_name(): LangchainEmbedding,
MockEmbedding.class_name(): MockEmbedding,
}
def load_embed_model(data: dict) -> BaseEmbedding:
"""Load Embedding by name."""
name = data.get("class_name", None)
if name is None:
raise ValueError("Embedding loading requires a class_name")
if name not in RECOGNIZED_EMBEDDINGS:
raise ValueError(f"Invalid Embedding name: {name}")
# special handling for LangchainEmbedding
# it can be any local model technially
if name == LangchainEmbedding.class_name():
local_name = data.get("model_name", None)
if local_name is not None:
return resolve_embed_model("local:" + local_name)
else:
raise ValueError("LangchainEmbedding requires a model_name")
return RECOGNIZED_EMBEDDINGS[name].from_dict(data)
| [] |
2024-01-10 | cbmchat/llama_index | program~llm_program.py | from typing import Any, Dict, Optional, Type, Union, cast
from llama_index.bridge.pydantic import BaseModel
from llama_index.llms.base import LLM
from llama_index.llms.openai import OpenAI
from llama_index.output_parsers.pydantic import PydanticOutputParser
from llama_index.program.base_program import BasePydanticProgram
from llama_index.prompts.base import PromptTemplate
class LLMTextCompletionProgram(BasePydanticProgram[BaseModel]):
"""
LLM Text Completion Program.
Uses generic LLM text completion + an output parser to generate a structured output.
"""
def __init__(
self,
output_parser: PydanticOutputParser,
prompt: PromptTemplate,
llm: LLM,
function_call: Union[str, Dict[str, Any]],
verbose: bool = False,
) -> None:
self._output_parser = output_parser
self._llm = llm
self._prompt = prompt
self._verbose = verbose
self._function_call = function_call
@classmethod
def from_defaults(
cls,
output_parser: PydanticOutputParser,
prompt_template_str: Optional[str] = None,
prompt: Optional[PromptTemplate] = None,
llm: Optional[LLM] = None,
verbose: bool = False,
function_call: Optional[Union[str, Dict[str, Any]]] = None,
**kwargs: Any,
) -> "LLMTextCompletionProgram":
llm = llm or OpenAI(temperature=0, model="gpt-3.5-turbo-0613")
if prompt is None and prompt_template_str is None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt is not None and prompt_template_str is not None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt_template_str is not None:
prompt = PromptTemplate(prompt_template_str)
function_call = function_call or {
"name": output_parser.output_cls.schema()["title"]
}
return cls(
output_parser,
prompt=cast(PromptTemplate, prompt),
llm=llm,
function_call=function_call,
verbose=verbose,
)
@property
def output_cls(self) -> Type[BaseModel]:
return self._output_parser.output_cls
def __call__(
self,
*args: Any,
**kwargs: Any,
) -> BaseModel:
prompt_with_parse_instrs_tmpl = self._output_parser.format(
self._prompt.format(**kwargs)
)
prompt_with_parse_instrs = PromptTemplate(prompt_with_parse_instrs_tmpl)
formatted_prompt = prompt_with_parse_instrs.format()
response = self._llm.complete(formatted_prompt)
raw_output = response.text
model_output = self._output_parser.parse(raw_output)
return model_output
| [] |
2024-01-10 | cbmchat/llama_index | tools~retriever_tool.py | """Retriever tool."""
from typing import Any, Optional, cast
from llama_index.indices.base_retriever import BaseRetriever
from llama_index.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
from llama_index.langchain_helpers.agents.tools import LlamaIndexTool
DEFAULT_NAME = "retriever_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and retrieving a set of relevant documents.
"""
class RetrieverTool(AsyncBaseTool):
"""Retriever tool.
A tool making use of a retriever.
Args:
retriever (BaseRetriever): A retriever.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
) -> None:
self._retriever = retriever
self._metadata = metadata
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
name: Optional[str] = None,
description: Optional[str] = None,
) -> "RetrieverTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(retriever=retriever, metadata=metadata)
@property
def retriever(self) -> BaseRetriever:
return self._retriever
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, input: Any) -> ToolOutput:
query_str = cast(str, input)
docs = self._retriever.retrieve(query_str)
return ToolOutput(
content=str(docs),
tool_name=self.metadata.name,
raw_input={"input": input},
raw_output=docs,
)
async def acall(self, input: Any) -> ToolOutput:
query_str = cast(str, input)
docs = await self._retriever.aretrieve(query_str)
return ToolOutput(
content=str(docs),
tool_name=self.metadata.name,
raw_input={"input": input},
raw_output=docs,
)
def as_langchain_tool(self) -> LlamaIndexTool:
raise NotImplementedError("`as_langchain_tool` not implemented here.")
| [] |
2024-01-10 | cbmchat/llama_index | llms~loading.py | from typing import Dict, Type
from llama_index.llms.base import LLM
from llama_index.llms.custom import CustomLLM
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.llms.langchain import LangChainLLM
from llama_index.llms.llama_cpp import LlamaCPP
from llama_index.llms.mock import MockLLM
from llama_index.llms.openai import OpenAI
from llama_index.llms.palm import PaLM
from llama_index.llms.predibase import PredibaseLLM
from llama_index.llms.replicate import Replicate
from llama_index.llms.xinference import Xinference
RECOGNIZED_LLMS: Dict[str, Type[LLM]] = {
MockLLM.class_name(): MockLLM,
Replicate.class_name(): Replicate,
HuggingFaceLLM.class_name(): HuggingFaceLLM,
OpenAI.class_name(): OpenAI,
Xinference.class_name(): Xinference,
LlamaCPP.class_name(): LlamaCPP,
LangChainLLM.class_name(): LangChainLLM,
PaLM.class_name(): PaLM,
PredibaseLLM.class_name(): PredibaseLLM,
CustomLLM.class_name(): CustomLLM,
}
def load_llm(data: dict) -> LLM:
"""Load LLM by name."""
llm_name = data.get("class_name", None)
if llm_name is None:
raise ValueError("LLM loading requires a class_name")
if llm_name not in RECOGNIZED_LLMS:
raise ValueError(f"Invalid LLM name: {llm_name}")
return RECOGNIZED_LLMS[llm_name].from_dict(data)
| [] |
2024-01-10 | cbmchat/llama_index | program~guidance_program.py | from typing import TYPE_CHECKING, Any, Optional, Type, cast
from llama_index.bridge.pydantic import BaseModel
from llama_index.program.llm_prompt_program import BaseLLMFunctionProgram
from llama_index.prompts.base import PromptTemplate
from llama_index.prompts.guidance_utils import (
parse_pydantic_from_guidance_program,
pydantic_to_guidance_output_template_markdown,
)
if TYPE_CHECKING:
from guidance.llms import LLM as GuidanceLLM
class GuidancePydanticProgram(BaseLLMFunctionProgram["GuidanceLLM"]):
"""
A guidance-based function that returns a pydantic model.
Note: this interface is not yet stable.
"""
def __init__(
self,
output_cls: Type[BaseModel],
prompt_template_str: str,
guidance_llm: Optional["GuidanceLLM"] = None,
verbose: bool = False,
):
try:
from guidance import Program
from guidance.llms import OpenAI
except ImportError as e:
raise ImportError(
"guidance package not found." "please run `pip install guidance`"
) from e
llm = guidance_llm or OpenAI("text-davinci-003")
output_str = pydantic_to_guidance_output_template_markdown(output_cls)
full_str = prompt_template_str + "\n" + output_str
self._full_str = full_str
self._guidance_program = Program(full_str, llm=llm, silent=not verbose)
self._output_cls = output_cls
self._verbose = verbose
@classmethod
def from_defaults(
cls,
output_cls: Type[BaseModel],
prompt_template_str: Optional[str] = None,
prompt: Optional[PromptTemplate] = None,
llm: Optional["GuidanceLLM"] = None,
**kwargs: Any,
) -> "BaseLLMFunctionProgram":
"""From defaults."""
if prompt is None and prompt_template_str is None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt is not None and prompt_template_str is not None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt is not None:
prompt_template_str = prompt.template
prompt_template_str = cast(str, prompt_template_str)
return cls(output_cls, prompt_template_str, guidance_llm=llm, **kwargs)
@property
def output_cls(self) -> Type[BaseModel]:
return self._output_cls
def __call__(
self,
*args: Any,
**kwargs: Any,
) -> BaseModel:
executed_program = self._guidance_program(**kwargs)
pydantic_obj = parse_pydantic_from_guidance_program(
program=executed_program, cls=self._output_cls
)
return pydantic_obj
| [] |
2024-01-10 | cbmchat/llama_index | program~__init__.py | from llama_index.program.base_program import BasePydanticProgram
from llama_index.program.guidance_program import GuidancePydanticProgram
from llama_index.program.openai_program import OpenAIPydanticProgram
from llama_index.program.predefined.df import (
DataFrame,
DataFrameRowsOnly,
DFFullProgram,
DFRowsProgram,
)
from llama_index.program.llm_program import LLMTextCompletionProgram
__all__ = [
"BasePydanticProgram",
"GuidancePydanticProgram",
"OpenAIPydanticProgram",
"LLMTextCompletionProgram",
"DataFrame",
"DataFrameRowsOnly",
"DFRowsProgram",
"DFFullProgram",
]
| [] |
2024-01-10 | cbmchat/llama_index | langchain_helpers~sql_wrapper.py | """SQL wrapper around SQLDatabase in langchain."""
from typing import Any, Dict, List, Tuple, Optional
from llama_index.bridge.langchain import SQLDatabase as LangchainSQLDatabase
from sqlalchemy import MetaData, create_engine, insert, text
from sqlalchemy.engine import Engine
class SQLDatabase(LangchainSQLDatabase):
"""SQL Database.
Wrapper around SQLDatabase object from langchain. Offers
some helper utilities for insertion and querying.
See `langchain documentation <https://tinyurl.com/4we5ku8j>`_ for more details:
Args:
*args: Arguments to pass to langchain SQLDatabase.
**kwargs: Keyword arguments to pass to langchain SQLDatabase.
"""
@property
def engine(self) -> Engine:
"""Return SQL Alchemy engine."""
return self._engine
@property
def metadata_obj(self) -> MetaData:
"""Return SQL Alchemy metadata."""
return self._metadata
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> "SQLDatabase":
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
def get_table_columns(self, table_name: str) -> List[Any]:
"""Get table columns."""
return self._inspector.get_columns(table_name)
def get_single_table_info(self, table_name: str) -> str:
"""Get table info for a single table."""
# same logic as table_info, but with specific table names
template = (
"Table '{table_name}' has columns: {columns}, "
"and foreign keys: {foreign_keys}."
)
columns = []
for column in self._inspector.get_columns(table_name):
if column.get("comment"):
columns.append(
(
f"{column['name']} ({str(column['type'])}): "
f"'{column.get('comment')}'"
)
)
else:
columns.append(f"{column['name']} ({str(column['type'])})")
column_str = ", ".join(columns)
foreign_keys = []
for foreign_key in self._inspector.get_foreign_keys(table_name):
foreign_keys.append(
f"{foreign_key['constrained_columns']} -> "
f"{foreign_key['referred_table']}.{foreign_key['referred_columns']}"
)
foreign_key_str = ", ".join(foreign_keys)
table_str = template.format(
table_name=table_name, columns=column_str, foreign_keys=foreign_key_str
)
return table_str
def insert_into_table(self, table_name: str, data: dict) -> None:
"""Insert data into a table."""
table = self._metadata.tables[table_name]
stmt = insert(table).values(**data)
with self._engine.connect() as connection:
connection.execute(stmt)
connection.commit()
def run_sql(self, command: str) -> Tuple[str, Dict]:
"""Execute a SQL statement and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.connect() as connection:
cursor = connection.execute(text(command))
if cursor.returns_rows:
result = cursor.fetchall()
return str(result), {"result": result}
return "", {}
| [
"Table '{table_name}' has columns: {columns}, and foreign keys: {foreign_keys}."
] |
2024-01-10 | cbmchat/llama_index | query_engine~sub_question_query_engine.py | import asyncio
import logging
from typing import List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel
from llama_index.async_utils import run_async_tasks
from llama_index.bridge.langchain import get_color_mapping, print_text
from llama_index.callbacks.base import CallbackManager
from llama_index.callbacks.schema import CBEventType, EventPayload
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.query.schema import QueryBundle
from llama_index.indices.service_context import ServiceContext
from llama_index.question_gen.llm_generators import LLMQuestionGenerator
from llama_index.question_gen.openai_generator import OpenAIQuestionGenerator
from llama_index.question_gen.types import BaseQuestionGenerator, SubQuestion
from llama_index.response.schema import RESPONSE_TYPE
from llama_index.response_synthesizers import BaseSynthesizer, get_response_synthesizer
from llama_index.schema import NodeWithScore, TextNode
from llama_index.tools.query_engine import QueryEngineTool
logger = logging.getLogger(__name__)
class SubQuestionAnswerPair(BaseModel):
"""
Pair of the sub question and optionally its answer (if its been answered yet).
"""
sub_q: SubQuestion
answer: Optional[str] = None
sources: Optional[List[NodeWithScore]] = None
class SubQuestionQueryEngine(BaseQueryEngine):
"""Sub question query engine.
A query engine that breaks down a complex query (e.g. compare and contrast) into
many sub questions and their target query engine for execution.
After executing all sub questions, all responses are gathered and sent to
response synthesizer to produce the final response.
Args:
question_gen (BaseQuestionGenerator): A module for generating sub questions
given a complex question and tools.
response_synthesizer (BaseSynthesizer): A response synthesizer for
generating the final response
query_engine_tools (Sequence[QueryEngineTool]): Tools to answer the
sub questions.
verbose (bool): whether to print intermediate questions and answers.
Defaults to True
use_async (bool): whether to execute the sub questions with asyncio.
Defaults to True
"""
def __init__(
self,
question_gen: BaseQuestionGenerator,
response_synthesizer: BaseSynthesizer,
query_engine_tools: Sequence[QueryEngineTool],
callback_manager: Optional[CallbackManager] = None,
verbose: bool = True,
use_async: bool = False,
) -> None:
self._question_gen = question_gen
self._response_synthesizer = response_synthesizer
self._metadatas = [x.metadata for x in query_engine_tools]
self._query_engines = {
tool.metadata.name: tool.query_engine for tool in query_engine_tools
}
self._verbose = verbose
self._use_async = use_async
super().__init__(callback_manager)
@classmethod
def from_defaults(
cls,
query_engine_tools: Sequence[QueryEngineTool],
question_gen: Optional[BaseQuestionGenerator] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
service_context: Optional[ServiceContext] = None,
verbose: bool = True,
use_async: bool = True,
) -> "SubQuestionQueryEngine":
callback_manager = None
if service_context is not None:
callback_manager = service_context.callback_manager
elif len(query_engine_tools) > 0:
callback_manager = query_engine_tools[0].query_engine.callback_manager
if question_gen is None:
if service_context is None:
# use default openai model that supports function calling API
question_gen = OpenAIQuestionGenerator.from_defaults()
else:
# try to use OpenAI function calling based question generator.
# if incompatible, use general LLM question generator
try:
question_gen = OpenAIQuestionGenerator.from_defaults(
llm=service_context.llm
)
except ValueError:
question_gen = LLMQuestionGenerator.from_defaults(
service_context=service_context
)
synth = response_synthesizer or get_response_synthesizer(
callback_manager=callback_manager,
service_context=service_context,
use_async=use_async,
)
return cls(
question_gen,
synth,
query_engine_tools,
callback_manager=callback_manager,
verbose=verbose,
use_async=use_async,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
sub_questions = self._question_gen.generate(self._metadatas, query_bundle)
colors = get_color_mapping([str(i) for i in range(len(sub_questions))])
if self._verbose:
print_text(f"Generated {len(sub_questions)} sub questions.\n")
if self._use_async:
tasks = [
self._aquery_subq(sub_q, color=colors[str(ind)])
for ind, sub_q in enumerate(sub_questions)
]
qa_pairs_all = run_async_tasks(tasks)
qa_pairs_all = cast(List[Optional[SubQuestionAnswerPair]], qa_pairs_all)
else:
qa_pairs_all = [
self._query_subq(sub_q, color=colors[str(ind)])
for ind, sub_q in enumerate(sub_questions)
]
# filter out sub questions that failed
qa_pairs: List[SubQuestionAnswerPair] = list(filter(None, qa_pairs_all))
nodes = [self._construct_node(pair) for pair in qa_pairs]
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
sub_questions = await self._question_gen.agenerate(
self._metadatas, query_bundle
)
colors = get_color_mapping([str(i) for i in range(len(sub_questions))])
if self._verbose:
print_text(f"Generated {len(sub_questions)} sub questions.\n")
tasks = [
self._aquery_subq(sub_q, color=colors[str(ind)])
for ind, sub_q in enumerate(sub_questions)
]
qa_pairs_all = await asyncio.gather(*tasks)
qa_pairs_all = cast(List[Optional[SubQuestionAnswerPair]], qa_pairs_all)
# filter out sub questions that failed
qa_pairs: List[SubQuestionAnswerPair] = list(filter(None, qa_pairs_all))
nodes = [self._construct_node(pair) for pair in qa_pairs]
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
def _construct_node(self, qa_pair: SubQuestionAnswerPair) -> NodeWithScore:
node_text = (
f"Sub question: {qa_pair.sub_q.sub_question}\nResponse: {qa_pair.answer}"
)
return NodeWithScore(node=TextNode(text=node_text))
async def _aquery_subq(
self, sub_q: SubQuestion, color: Optional[str] = None
) -> Optional[SubQuestionAnswerPair]:
try:
with self.callback_manager.event(
CBEventType.SUB_QUESTION,
payload={EventPayload.SUB_QUESTION: SubQuestionAnswerPair(sub_q=sub_q)},
) as event:
question = sub_q.sub_question
query_engine = self._query_engines[sub_q.tool_name]
if self._verbose:
print_text(f"[{sub_q.tool_name}] Q: {question}\n", color=color)
response = await query_engine.aquery(question)
response_text = str(response)
if self._verbose:
print_text(f"[{sub_q.tool_name}] A: {response_text}\n", color=color)
qa_pair = SubQuestionAnswerPair(
sub_q=sub_q, answer=response_text, sources=response.source_nodes
)
event.on_end(payload={EventPayload.SUB_QUESTION: qa_pair})
return qa_pair
except ValueError:
logger.warn(f"[{sub_q.tool_name}] Failed to run {question}")
return None
def _query_subq(
self, sub_q: SubQuestion, color: Optional[str] = None
) -> Optional[SubQuestionAnswerPair]:
try:
with self.callback_manager.event(
CBEventType.SUB_QUESTION,
payload={EventPayload.SUB_QUESTION: SubQuestionAnswerPair(sub_q=sub_q)},
) as event:
question = sub_q.sub_question
query_engine = self._query_engines[sub_q.tool_name]
if self._verbose:
print_text(f"[{sub_q.tool_name}] Q: {question}\n", color=color)
response = query_engine.query(question)
response_text = str(response)
if self._verbose:
print_text(f"[{sub_q.tool_name}] A: {response_text}\n", color=color)
qa_pair = SubQuestionAnswerPair(
sub_q=sub_q, answer=response_text, sources=response.source_nodes
)
event.on_end(payload={EventPayload.SUB_QUESTION: qa_pair})
return qa_pair
except ValueError:
logger.warn(f"[{sub_q.tool_name}] Failed to run {question}")
return None
| [] |
2024-01-10 | cbmchat/llama_index | llama_index~prompts~default_prompts.py | """Set of default prompts."""
from llama_index.prompts.base import PromptTemplate
from llama_index.prompts.prompt_type import PromptType
############################################
# Tree
############################################
# DEFAULT_SUMMARY_PROMPT_TMPL = (
# "Write a summary of the following. Try to use only the "
# "information provided. "
# "Try to include as many key details as possible.\n"
# "\n"
# "\n"
# "{context_str}\n"
# "\n"
# "\n"
# 'SUMMARY:"""\n'
# )
DEFAULT_SUMMARY_PROMPT_TMPL = (
"写一个关于以下内容的摘要。尽量只使用所提供的信息。"
"尽量包含尽可能多的关键细节。\n"
"\n"
"\n"
"{context_str}\n"
"\n"
"\n"
'摘要:"""\n'
)
DEFAULT_SUMMARY_PROMPT = PromptTemplate(
DEFAULT_SUMMARY_PROMPT_TMPL, prompt_type=PromptType.SUMMARY
)
# insert prompts
# DEFAULT_INSERT_PROMPT_TMPL = (
# "Context information is below. It is provided in a numbered list "
# "(1 to {num_chunks}),"
# "where each item in the list corresponds to a summary.\n"
# "---------------------\n"
# "{context_list}"
# "---------------------\n"
# "Given the context information, here is a new piece of "
# "information: {new_chunk_text}\n"
# "Answer with the number corresponding to the summary that should be updated. "
# "The answer should be the number corresponding to the "
# "summary that is most relevant to the question.\n"
# )
DEFAULT_INSERT_PROMPT_TMPL = (
"下面提供了上下文信息,以编号列表形式提供(从1到{num_chunks}),"
"其中列表中的每一项对应一个摘要。\n"
"---------------------\n"
"{context_list}"
"---------------------\n"
"根据上下文信息,这是一个新的信息片段:{new_chunk_text}\n"
"答案为应更新的摘要的编号。答案应为与问题最相关的摘要对应的编号。\n"
)
DEFAULT_INSERT_PROMPT = PromptTemplate(
DEFAULT_INSERT_PROMPT_TMPL, prompt_type=PromptType.TREE_INSERT
)
# # single choice
# DEFAULT_QUERY_PROMPT_TMPL = (
# "Some choices are given below. It is provided in a numbered list "
# "(1 to {num_chunks}),"
# "where each item in the list corresponds to a summary.\n"
# "---------------------\n"
# "{context_list}"
# "\n---------------------\n"
# "Using only the choices above and not prior knowledge, return "
# "the choice that is most relevant to the question: '{query_str}'\n"
# "Provide choice in the following format: 'ANSWER: <number>' and explain why "
# "this summary was selected in relation to the question.\n"
# )
DEFAULT_QUERY_PROMPT_TMPL = (
"以下是一些选择项,它们以编号列表的形式呈现(从1到{num_chunks}),"
"其中列表中的每个项目对应一个摘要。\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"仅使用上述选择,不使用先前的知识,找出与问题 '{query_str}' 最相关的选择。\n"
"请以以下格式提供答案:'ANSWER: <编号>',并解释为什么选择这个摘要与问题相关。\n"
)
DEFAULT_QUERY_PROMPT = PromptTemplate(
DEFAULT_QUERY_PROMPT_TMPL, prompt_type=PromptType.TREE_SELECT
)
# multiple choice
# DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL = (
# "Some choices are given below. It is provided in a numbered "
# "list (1 to {num_chunks}), "
# "where each item in the list corresponds to a summary.\n"
# "---------------------\n"
# "{context_list}"
# "\n---------------------\n"
# "Using only the choices above and not prior knowledge, return the top choices "
# "(no more than {branching_factor}, ranked by most relevant to least) that "
# "are most relevant to the question: '{query_str}'\n"
# "Provide choices in the following format: 'ANSWER: <numbers>' and explain why "
# "these summaries were selected in relation to the question.\n"
# )
DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL = (
"下面列出了一些选择项,它们以编号列表的形式呈现(从1到{num_chunks}),"
"列表中的每个项目对应一个摘要。\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"仅使用上述选择,不使用先前的知识,返回与问题 '{query_str}' 最相关的前若干选择项 "
"(不超过{branching_factor}个),按从最相关到最不相关的顺序排列。\n"
"请以以下格式提供选择:'ANSWER: <编号>',并解释为什么选择这些摘要与问题相关。\n"
)
DEFAULT_QUERY_PROMPT_MULTIPLE = PromptTemplate(
DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL, prompt_type=PromptType.TREE_SELECT_MULTIPLE
)
# DEFAULT_REFINE_PROMPT_TMPL = (
# "The original query is as follows: {query_str}\n"
# "We have provided an existing answer: {existing_answer}\n"
# "We have the opportunity to refine the existing answer "
# "(only if needed) with some more context below.\n"
# "------------\n"
# "{context_msg}\n"
# "------------\n"
# "Given the new context, refine the original answer to better "
# "answer the query. "
# "If the context isn't useful, return the original answer.\n"
# "Refined Answer: "
# )
DEFAULT_REFINE_PROMPT_TMPL = (
"原始查询如下:{query_str}\n"
"我们已经提供了一个现有答案:{existing_answer}\n"
"我们有机会通过以下一些更多的上下文来完善现有答案(仅在需要时)。 \n"
"------------\n"
"{context_msg}\n"
"------------\n"
"在新的上下文基础上,完善原始答案以更好地回答查询。"
"如果上下文对于完善答案没有帮助,那么返回原始答案。\n"
"完善后的答案:"
)
DEFAULT_REFINE_PROMPT = PromptTemplate(
DEFAULT_REFINE_PROMPT_TMPL, prompt_type=PromptType.REFINE
)
# DEFAULT_TEXT_QA_PROMPT_TMPL = (
# "Context information is below.\n"
# "---------------------\n"
# "{context_str}\n"
# "---------------------\n"
# "Given the context information and not prior knowledge, "
# "answer the query.\n"
# "Query: {query_str}\n"
# "Answer: "
# )
DEFAULT_TEXT_QA_PROMPT_TMPL = (
"下面是上下文信息。\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"根据上下文信息而不是先前的知识,回答查询。\n"
"查询:{query_str}\n"
"答案:"
)
DEFAULT_TEXT_QA_PROMPT = PromptTemplate(
DEFAULT_TEXT_QA_PROMPT_TMPL, prompt_type=PromptType.QUESTION_ANSWER
)
# DEFAULT_TREE_SUMMARIZE_TMPL = (
# "Context information from multiple sources is below.\n"
# "---------------------\n"
# "{context_str}\n"
# "---------------------\n"
# "Given the information from multiple sources and not prior knowledge, "
# "answer the query.\n"
# "Query: {query_str}\n"
# "Answer: "
# )
DEFAULT_TREE_SUMMARIZE_TMPL = (
"下面是来自多个来源的上下文信息。\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"根据来自多个来源的信息而不是先前的知识,回答查询。\n"
"查询:{query_str}\n"
"答案:"
)
DEFAULT_TREE_SUMMARIZE_PROMPT = PromptTemplate(
DEFAULT_TREE_SUMMARIZE_TMPL, prompt_type=PromptType.SUMMARY
)
############################################
# Keyword Table
############################################
# DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
# "Some text is provided below. Given the text, extract up to {max_keywords} "
# "keywords from the text. Avoid stopwords."
# "---------------------\n"
# "{text}\n"
# "---------------------\n"
# "Provide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n"
# )
DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
"下面提供了一些文本。根据文本,从中提取最多 {max_keywords} 个关键词。避免使用停用词。"
"---------------------\n"
"{text}\n"
"---------------------\n"
"请以以下逗号分隔的格式提供关键词:'KEYWORDS: <关键词>'\n"
)
DEFAULT_KEYWORD_EXTRACT_TEMPLATE = PromptTemplate(
DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL, prompt_type=PromptType.KEYWORD_EXTRACT
)
# NOTE: the keyword extraction for queries can be the same as
# the one used to build the index, but here we tune it to see if performance is better.
# DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
# "A question is provided below. Given the question, extract up to {max_keywords} "
# "keywords from the text. Focus on extracting the keywords that we can use "
# "to best lookup answers to the question. Avoid stopwords.\n"
# "---------------------\n"
# "{question}\n"
# "---------------------\n"
# "Provide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n"
# )
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
"下面提供了一个问题。根据问题,从中提取最多 {max_keywords} 个关键词。专注于提取我们可以用来最佳查找答案的关键词。避免使用停用词。\n"
"---------------------\n"
"{question}\n"
"---------------------\n"
"请以以下逗号分隔的格式提供关键词:'KEYWORDS: <关键词>'\n"
)
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE = PromptTemplate(
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL,
prompt_type=PromptType.QUERY_KEYWORD_EXTRACT,
)
############################################
# Structured Store
############################################
# DEFAULT_SCHEMA_EXTRACT_TMPL = (
# "We wish to extract relevant fields from an unstructured text chunk into "
# "a structured schema. We first provide the unstructured text, and then "
# "we provide the schema that we wish to extract. "
# "-----------text-----------\n"
# "{text}\n"
# "-----------schema-----------\n"
# "{schema}\n"
# "---------------------\n"
# "Given the text and schema, extract the relevant fields from the text in "
# "the following format: "
# "field1: <value>\nfield2: <value>\n...\n\n"
# "If a field is not present in the text, don't include it in the output."
# "If no fields are present in the text, return a blank string.\n"
# "Fields: "
# )
DEFAULT_SCHEMA_EXTRACT_TMPL = (
"我们希望从非结构化的文本块中提取相关字段,生成一个结构化模式。"
"我们首先提供非结构化文本,然后提供我们希望提取的模式。"
"-----------文本-----------\n"
"{text}\n"
"-----------模式-----------\n"
"{schema}\n"
"---------------------\n"
"根据给定的文本和模式,在以下格式中从文本中提取相关字段:"
"字段1: <值>\n字段2: <值>\n...\n\n"
"如果文本中没有某个字段,请不要在输出中包含它。"
"如果文本中没有任何字段,请返回一个空字符串。\n"
"字段:"
)
DEFAULT_SCHEMA_EXTRACT_PROMPT = PromptTemplate(
DEFAULT_SCHEMA_EXTRACT_TMPL, prompt_type=PromptType.SCHEMA_EXTRACT
)
# NOTE: taken from langchain and adapted
# https://tinyurl.com/b772sd77
# DEFAULT_TEXT_TO_SQL_TMPL = (
# "Given an input question, first create a syntactically correct {dialect} "
# "query to run, then look at the results of the query and return the answer. "
# "You can order the results by a relevant column to return the most "
# "interesting examples in the database.\n"
# "Never query for all the columns from a specific table, only ask for a "
# "few relevant columns given the question.\n"
# "Pay attention to use only the column names that you can see in the schema "
# "description. "
# "Be careful to not query for columns that do not exist. "
# "Pay attention to which column is in which table. "
# "Also, qualify column names with the table name when needed.\n"
# "Use the following format:\n"
# "Question: Question here\n"
# "SQLQuery: SQL Query to run\n"
# "SQLResult: Result of the SQLQuery\n"
# "Answer: Final answer here\n"
# "Only use the tables listed below.\n"
# "{schema}\n"
# "Question: {query_str}\n"
# "SQLQuery: "
# )
DEFAULT_TEXT_TO_SQL_TMPL = (
"给定一个输入问题,首先创建一个符合语法的{dialect}查询以运行,然后查看查询结果并返回答案。"
"您可以通过相关列对结果进行排序,以返回数据库中最有趣的示例。"
"永远不要查询特定表中的所有列,只询问与问题相关的少数列。"
"注意仅使用在模式描述中可见的列名。"
"小心不要查询不存在的列。"
"注意哪个列位于哪个表中。"
"在需要时,也要用表名限定列名。\n"
"使用以下格式:\n"
"问题:在这里提出问题\n"
"SQL查询:要运行的SQL查询\n"
"SQL结果:SQL查询结果\n"
"答案:在这里给出最终答案\n"
"仅使用下面列出的表。\n"
"{schema}\n"
"问题:{query_str}\n"
"SQL查询:"
)
DEFAULT_TEXT_TO_SQL_PROMPT = PromptTemplate(
DEFAULT_TEXT_TO_SQL_TMPL,
prompt_type=PromptType.TEXT_TO_SQL,
)
DEFAULT_TEXT_TO_SQL_PGVECTOR_TMPL = """\
Given an input question, first create a syntactically correct {dialect} \
query to run, then look at the results of the query and return the answer. \
You can order the results by a relevant column to return the most \
interesting examples in the database.
Pay attention to use only the column names that you can see in the schema \
description. Be careful to not query for columns that do not exist. \
Pay attention to which column is in which table. Also, qualify column names \
with the table name when needed.
IMPORTANT NOTE: you can use specialized pgvector syntax (`<->`) to do nearest \
neighbors/semantic search to a given vector from an embeddings column in the table. \
The embeddings value for a given row typically represents the semantic meaning of that row. \
The vector represents an embedding representation \
of the question, given below. Do NOT fill in the vector values directly, but rather specify a \
`[query_vector]` placeholder. For instance, some select statement examples below \
(the name of the embeddings column is `embedding`):
SELECT * FROM items ORDER BY embedding <-> '[query_vector]' LIMIT 5;
SELECT * FROM items WHERE id != 1 ORDER BY embedding <-> (SELECT embedding FROM items WHERE id = 1) LIMIT 5;
SELECT * FROM items WHERE embedding <-> '[query_vector]' < 5;
You are required to use the following format, \
each taking one line:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
Only use tables listed below.
{schema}
Question: {query_str}
SQLQuery: \
"""
DEFAULT_TEXT_TO_SQL_PGVECTOR_PROMPT = PromptTemplate(
DEFAULT_TEXT_TO_SQL_PGVECTOR_TMPL,
prompt_type=PromptType.TEXT_TO_SQL,
)
# NOTE: by partially filling schema, we can reduce to a QuestionAnswer prompt
# that we can feed to ur table
# DEFAULT_TABLE_CONTEXT_TMPL = (
# "We have provided a table schema below. "
# "---------------------\n"
# "{schema}\n"
# "---------------------\n"
# "We have also provided context information below. "
# "{context_str}\n"
# "---------------------\n"
# "Given the context information and the table schema, "
# "give a response to the following task: {query_str}"
# )
DEFAULT_TABLE_CONTEXT_TMPL = (
"我们在下面提供了一个表结构。"
"---------------------\n"
"{schema}\n"
"---------------------\n"
"我们还在下面提供了一些上下文信息。"
"{context_str}\n"
"---------------------\n"
"根据上下文信息和表结构,"
"针对以下任务给出一个回答:{query_str}"
)
# DEFAULT_TABLE_CONTEXT_QUERY = (
# "Provide a high-level description of the table, "
# "as well as a description of each column in the table. "
# "Provide answers in the following format:\n"
# "TableDescription: <description>\n"
# "Column1Description: <description>\n"
# "Column2Description: <description>\n"
# "...\n\n"
# )
DEFAULT_TABLE_CONTEXT_QUERY = (
"提供一个关于表的高级描述,以及表中每个列的描述。"
"请按以下格式提供答案:\n"
"表描述: <描述>\n"
"列1描述: <描述>\n"
"列2描述: <描述>\n"
"...\n\n"
)
DEFAULT_TABLE_CONTEXT_PROMPT = PromptTemplate(
DEFAULT_TABLE_CONTEXT_TMPL, prompt_type=PromptType.TABLE_CONTEXT
)
# NOTE: by partially filling schema, we can reduce to a RefinePrompt
# that we can feed to ur table
# DEFAULT_REFINE_TABLE_CONTEXT_TMPL = (
# "We have provided a table schema below. "
# "---------------------\n"
# "{schema}\n"
# "---------------------\n"
# "We have also provided some context information below. "
# "{context_msg}\n"
# "---------------------\n"
# "Given the context information and the table schema, "
# "give a response to the following task: {query_str}\n"
# "We have provided an existing answer: {existing_answer}\n"
# "Given the new context, refine the original answer to better "
# "answer the question. "
# "If the context isn't useful, return the original answer."
# )
DEFAULT_REFINE_TABLE_CONTEXT_TMPL = (
"我们在下面提供了一个表结构。"
"---------------------\n"
"{schema}\n"
"---------------------\n"
"我们还在下面提供了一些上下文信息。"
"{context_msg}\n"
"---------------------\n"
"根据上下文信息和表结构,"
"针对以下任务给出一个回答:{query_str}\n"
"我们已经提供了一个现有答案:{existing_answer}\n"
"根据新的上下文,优化原始答案以更好地回答问题。"
"如果上下文无用,请保持原始答案。"
)
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT = PromptTemplate(
DEFAULT_REFINE_TABLE_CONTEXT_TMPL, prompt_type=PromptType.TABLE_CONTEXT
)
############################################
# Knowledge-Graph Table
############################################
# DEFAULT_KG_TRIPLET_EXTRACT_TMPL = (
# "Some text is provided below. Given the text, extract up to "
# "{max_knowledge_triplets} "
# "knowledge triplets in the form of (subject, predicate, object). Avoid stopwords.\n"
# "---------------------\n"
# "Example:"
# "Text: Alice is Bob's mother."
# "Triplets:\n(Alice, is mother of, Bob)\n"
# "Text: Philz is a coffee shop founded in Berkeley in 1982.\n"
# "Triplets:\n"
# "(Philz, is, coffee shop)\n"
# "(Philz, founded in, Berkeley)\n"
# "(Philz, founded in, 1982)\n"
# "---------------------\n"
# "Text: {text}\n"
# "Triplets:\n"
# )
DEFAULT_KG_TRIPLET_EXTRACT_TMPL = (
"下面提供了一些文本。根据文本,提取最多 {max_knowledge_triplets} 个知识三元组,"
"形式为(主语,谓语,宾语)。避免使用停用词。\n"
"---------------------\n"
"示例:"
"文本:Alice是Bob的母亲。"
"三元组:\n(Alice,是...的母亲,Bob)\n"
"文本:Philz是于1982年在伯克利创立的咖啡店。\n"
"三元组:\n"
"(Philz,是,咖啡店)\n"
"(Philz,创立于,伯克利)\n"
"(Philz,创立于,1982年)\n"
"---------------------\n"
"文本:{text}\n"
"三元组:\n"
)
DEFAULT_KG_TRIPLET_EXTRACT_PROMPT = PromptTemplate(
DEFAULT_KG_TRIPLET_EXTRACT_TMPL, prompt_type=PromptType.KNOWLEDGE_TRIPLET_EXTRACT
)
############################################
# HYDE
##############################################
# HYDE_TMPL = (
# "Please write a passage to answer the question\n"
# "Try to include as many key details as possible.\n"
# "\n"
# "\n"
# "{context_str}\n"
# "\n"
# "\n"
# 'Passage:"""\n'
# )
HYDE_TMPL = (
"请撰写一个段落来回答问题\n"
"尽量包含尽可能多的关键细节。\n"
"\n"
"\n"
"{context_str}\n"
"\n"
"\n"
'段落:"""\n'
)
DEFAULT_HYDE_PROMPT = PromptTemplate(HYDE_TMPL, prompt_type=PromptType.SUMMARY)
############################################
# Simple Input
############################################
DEFAULT_SIMPLE_INPUT_TMPL = "{query_str}"
DEFAULT_SIMPLE_INPUT_PROMPT = PromptTemplate(
DEFAULT_SIMPLE_INPUT_TMPL, prompt_type=PromptType.SIMPLE_INPUT
)
############################################
# Pandas
############################################
# DEFAULT_PANDAS_TMPL = (
# "You are working with a pandas dataframe in Python.\n"
# "The name of the dataframe is `df`.\n"
# "This is the result of `print(df.head())`:\n"
# "{df_str}\n\n"
# "Here is the input query: {query_str}.\n"
# "Given the df information and the input query, please follow "
# "these instructions:\n"
# "{instruction_str}"
# "Output:\n"
# )
DEFAULT_PANDAS_TMPL = (
"您正在使用 Python 中的 pandas 数据帧。\n"
"数据帧的名称是 `df`。\n"
"这是 `print(df.head())` 的结果:\n"
"{df_str}\n\n"
"这是输入的查询:{query_str}。\n"
"根据 df 信息和输入的查询,请遵循以下说明:\n"
"{instruction_str}"
"输出:\n"
)
DEFAULT_PANDAS_PROMPT = PromptTemplate(DEFAULT_PANDAS_TMPL, prompt_type=PromptType.PANDAS)
############################################
# JSON Path
############################################
# DEFAULT_JSON_PATH_TMPL = (
# "We have provided a JSON schema below:\n"
# "{schema}\n"
# "Given a task, respond with a JSON Path query that "
# "can retrieve data from a JSON value that matches the schema.\n"
# "Task: {query_str}\n"
# "JSONPath: "
# )
DEFAULT_JSON_PATH_TMPL = (
"我们在下面提供了一个 JSON 模式:\n"
"{schema}\n"
"根据任务,使用一个 JSON Path 查询来检索与模式匹配的 JSON 值中的数据。\n"
"任务:{query_str}\n"
"JSONPath:"
)
DEFAULT_JSON_PATH_PROMPT = PromptTemplate(
DEFAULT_JSON_PATH_TMPL, prompt_type=PromptType.JSON_PATH
)
############################################
# Choice Select
############################################
# DEFAULT_CHOICE_SELECT_PROMPT_TMPL = (
# "A list of documents is shown below. Each document has a number next to it along "
# "with a summary of the document. A question is also provided. \n"
# "Respond with the numbers of the documents "
# "you should consult to answer the question, in order of relevance, as well \n"
# "as the relevance score. The relevance score is a number from 1-10 based on "
# "how relevant you think the document is to the question.\n"
# "Do not include any documents that are not relevant to the question. \n"
# "Example format: \n"
# "Document 1:\n<summary of document 1>\n\n"
# "Document 2:\n<summary of document 2>\n\n"
# "...\n\n"
# "Document 10:\n<summary of document 10>\n\n"
# "Question: <question>\n"
# "Answer:\n"
# "Doc: 9, Relevance: 7\n"
# "Doc: 3, Relevance: 4\n"
# "Doc: 7, Relevance: 3\n\n"
# "Let's try this now: \n\n"
# "{context_str}\n"
# "Question: {query_str}\n"
# "Answer:\n"
# )
DEFAULT_CHOICE_SELECT_PROMPT_TMPL = (
"下面显示了一份文档列表。每个文档旁边都有一个数字,以及文档的摘要。还提供了一个问题。\n"
"请按照相关性顺序回答,列出您认为用于回答问题的文档的编号以及相关性评分(1-10)。\n"
"请勿包括与问题无关的文档。\n"
"示例格式:\n"
"文档 1:\n<文档 1 的摘要>\n\n"
"文档 2:\n<文档 2 的摘要>\n\n"
"...\n\n"
"文档 10:\n<文档 10 的摘要>\n\n"
"问题: <问题>\n"
"答案:\n"
"文档:9,相关性:7\n"
"文档:3,相关性:4\n"
"文档:7,相关性:3\n\n"
"现在让我们试一试:\n\n"
"{context_str}\n"
"问题: {query_str}\n"
"答案:\n"
)
DEFAULT_CHOICE_SELECT_PROMPT = PromptTemplate(
DEFAULT_CHOICE_SELECT_PROMPT_TMPL, prompt_type=PromptType.CHOICE_SELECT
) | [
"原始查询如下:{query_str}\n我们已经提供了一个现有答案:{existing_answer}\n我们有机会通过以下一些更多的上下文来完善现有答案(仅在需要时)。 \n------------\n{context_msg}\n------------\n在新的上下文基础上,完善原始答案以更好地回答查询。如果上下文对于完善答案没有帮助,那么返回原始答案。\n完善后的答案:",
"下面列出了一些选择项,它们以编号列表的形式呈现(从1到{num_chunks}),列表中的每个项目对应一个摘要。\n---------------------\n{context_list}\n---------------------\n仅使用上述选择,不使用先前的知识,返回与问题 '{query_str}' 最相关的前若干选择项 (不超过{branching_factor}个),按从最相关到最不相关的顺序排列。\n请以以下格式提供选择:'ANSWER: <编号>',并解释为什么选择这些摘要与问题相关。\n",
"下面提供了上下文信息,以编号列表形式提供(从1到{num_chunks}),其中列表中的每一项对应一个摘要。\n---------------------\n{context_list}---------------------\n根据上下文信息,这是一个新的信息片段:{new_chunk_text}\n答案为应更新的摘要的编号。答案应为与问题最相关的摘要对应的编号。\n",
"{query_str}",
"下面提供了一些文本。根据文本,从中提取最多 {max_keywords} 个关键词。避免使用停用词。---------------------\n{text}\n---------------------\n请以以下逗号分隔的格式提供关键词:'KEYWORDS: <关键词>'\n",
"下面显示了一份文档列表。每个文档旁边都有一个数字,以及文档的摘要。还提供了一个问题。\n请按照相关性顺序回答,列出您认为用于回答问题的文档的编号以及相关性评分(1-10)。\n请勿包括与问题无关的文档。\n示例格式:\n文档 1:\n<文档 1 的摘要>\n\n文档 2:\n<文档 2 的摘要>\n\n...\n\n文档 10:\n<文档 10 的摘要>\n\n问题: <问题>\n答案:\n文档:9,相关性:7\n文档:3,相关性:4\n文档:7,相关性:3\n\n现在让我们试一试:\n\n{context_str}\n问题: {query_str}\n答案:\n",
"[query_vector]",
"写一个关于以下内容的摘要。尽量只使用所提供的信息。尽量包含尽可能多的关键细节。\n\n\n{context_str}\n\n\n摘要:\"\"\"\n",
"Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. You can order the results by a relevant column to return the most interesting examples in the database.\n\nPay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Pay attention to which column is in which table. Also, qualify column names with the table name when needed.\n\nIMPORTANT NOTE: you can use specialized pgvector syntax (`<->`) to do nearest neighbors/semantic search to a given vector from an embeddings column in the table. The embeddings value for a given row typically represents the semantic meaning of that row. The vector represents an embedding representation of the question, given below. Do NOT fill in the vector values directly, but rather specify a `[query_vector]` placeholder. For instance, some select statement examples below (the name of the embeddings column is `embedding`):\nSELECT * FROM items ORDER BY embedding <-> '[query_vector]' LIMIT 5;\nSELECT * FROM items WHERE id != 1 ORDER BY embedding <-> (SELECT embedding FROM items WHERE id = 1) LIMIT 5;\nSELECT * FROM items WHERE embedding <-> '[query_vector]' < 5;\n\nYou are required to use the following format, each taking one line:\n\nQuestion: Question here\nSQLQuery: SQL Query to run\nSQLResult: Result of the SQLQuery\nAnswer: Final answer here\n\nOnly use tables listed below.\n{schema}\n\n\nQuestion: {query_str}\nSQLQuery: ",
"下面是上下文信息。\n---------------------\n{context_str}\n---------------------\n根据上下文信息而不是先前的知识,回答查询。\n查询:{query_str}\n答案:",
"下面提供了一个问题。根据问题,从中提取最多 {max_keywords} 个关键词。专注于提取我们可以用来最佳查找答案的关键词。避免使用停用词。\n---------------------\n{question}\n---------------------\n请以以下逗号分隔的格式提供关键词:'KEYWORDS: <关键词>'\n",
"以下是一些选择项,它们以编号列表的形式呈现(从1到{num_chunks}),其中列表中的每个项目对应一个摘要。\n---------------------\n{context_list}\n---------------------\n仅使用上述选择,不使用先前的知识,找出与问题 '{query_str}' 最相关的选择。\n请以以下格式提供答案:'ANSWER: <编号>',并解释为什么选择这个摘要与问题相关。\n"
] |
2024-01-10 | cbmchat/llama_index | query_engine~pandas_query_engine.py | """Default query for PandasIndex.
WARNING: This tool provides the Agent access to the `eval` function.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would
require heavy sandboxing or virtual machines
"""
import logging
from typing import Any, Callable, Optional
import numpy as np
import pandas as pd
from llama_index.bridge.langchain import print_text
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.query.schema import QueryBundle
from llama_index.indices.service_context import ServiceContext
from llama_index.indices.struct_store.pandas import PandasIndex
from llama_index.prompts import BasePromptTemplate
from llama_index.prompts.default_prompts import DEFAULT_PANDAS_PROMPT
from llama_index.response.schema import Response
logger = logging.getLogger(__name__)
DEFAULT_INSTRUCTION_STR = (
"We wish to convert this query to executable Python code using Pandas.\n"
"The final line of code should be a Python expression that can be called "
"with the `eval()` function. This expression should represent a solution "
"to the query."
)
def default_output_processor(
output: str, df: pd.DataFrame, **output_kwargs: Any
) -> str:
"""Process outputs in a default manner."""
import ast
import sys
import traceback
if sys.version_info < (3, 9):
logger.warn(
"Python version must be >= 3.9 in order to use "
"the default output processor, which executes "
"the Python query. Instead, we will return the "
"raw Python instructions as a string."
)
return output
local_vars = {"df": df}
# NOTE: inspired from langchain's tool
# see langchain.tools.python.tool (PythonAstREPLTool)
try:
tree = ast.parse(output)
module = ast.Module(tree.body[:-1], type_ignores=[])
exec(ast.unparse(module), {}, local_vars) # type: ignore
module_end = ast.Module(tree.body[-1:], type_ignores=[])
module_end_str = ast.unparse(module_end) # type: ignore
print(module_end_str)
try:
return str(eval(module_end_str, {"np": np}, local_vars))
except Exception as e:
raise e
except Exception as e:
err_string = (
"There was an error running the output as Python code. "
f"Error message: {e}"
)
traceback.print_exc()
return err_string
class PandasQueryEngine(BaseQueryEngine):
"""GPT Pandas query.
Convert natural language to Pandas python code.
WARNING: This tool provides the Agent access to the `eval` function.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would
require heavy sandboxing or virtual machines
Args:
df (pd.DataFrame): Pandas dataframe to use.
instruction_str (Optional[str]): Instruction string to use.
output_processor (Optional[Callable[[str], str]]): Output processor.
A callable that takes in the output string, pandas DataFrame,
and any output kwargs and returns a string.
pandas_prompt (Optional[BasePromptTemplate]): Pandas prompt to use.
head (int): Number of rows to show in the table context.
"""
def __init__(
self,
df: pd.DataFrame,
instruction_str: Optional[str] = None,
output_processor: Optional[Callable] = None,
pandas_prompt: Optional[BasePromptTemplate] = None,
output_kwargs: Optional[dict] = None,
head: int = 5,
verbose: bool = False,
service_context: Optional[ServiceContext] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._df = df
self._head = head
self._pandas_prompt = pandas_prompt or DEFAULT_PANDAS_PROMPT
self._instruction_str = instruction_str or DEFAULT_INSTRUCTION_STR
self._output_processor = output_processor or default_output_processor
self._output_kwargs = output_kwargs or {}
self._verbose = verbose
self._service_context = service_context or ServiceContext.from_defaults()
super().__init__(self._service_context.callback_manager)
@classmethod
def from_index(cls, index: PandasIndex, **kwargs: Any) -> "PandasQueryEngine":
logger.warning(
"PandasIndex is deprecated. "
"Directly construct PandasQueryEngine with df instead."
)
return cls(df=index.df, service_context=index.service_context, **kwargs)
def _get_table_context(self) -> str:
"""Get table context."""
return str(self._df.head(self._head))
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
context = self._get_table_context()
pandas_response_str = self._service_context.llm_predictor.predict(
self._pandas_prompt,
df_str=context,
query_str=query_bundle.query_str,
instruction_str=self._instruction_str,
)
if self._verbose:
print_text(f"> Pandas Instructions:\n" f"```\n{pandas_response_str}\n```\n")
pandas_output = self._output_processor(
pandas_response_str,
self._df,
**self._output_kwargs,
)
if self._verbose:
print_text(f"> Pandas Output: {pandas_output}\n")
response_metadata = {
"pandas_instruction_str": pandas_response_str,
}
return Response(response=pandas_output, metadata=response_metadata)
async def _aquery(self, query_bundle: QueryBundle) -> Response:
return self._query(query_bundle)
# legacy
NLPandasQueryEngine = PandasQueryEngine
GPTNLPandasQueryEngine = PandasQueryEngine
| [] |
2024-01-10 | cbmchat/llama_index | agent~retriever_openai_agent.py | """Retriever OpenAI agent."""
from typing import List, Optional, Type
from llama_index.agent.openai_agent import (
DEFAULT_MAX_FUNCTION_CALLS,
DEFAULT_MODEL_NAME,
BaseOpenAIAgent,
)
from llama_index.callbacks.base import CallbackManager
from llama_index.llms.base import ChatMessage
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai_utils import is_function_calling_model
from llama_index.memory import BaseMemory, ChatMemoryBuffer
from llama_index.objects.base import ObjectRetriever
from llama_index.tools.types import BaseTool
class FnRetrieverOpenAIAgent(BaseOpenAIAgent):
"""Function Retriever OpenAI Agent.
Uses our object retriever module to retrieve openai agent.
"""
def __init__(
self,
retriever: ObjectRetriever[BaseTool],
llm: OpenAI,
memory: BaseMemory,
prefix_messages: List[ChatMessage],
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
) -> None:
super().__init__(
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
)
self._retriever = retriever
@classmethod
def from_retriever(
cls,
retriever: ObjectRetriever[BaseTool],
llm: Optional[OpenAI] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
) -> "FnRetrieverOpenAIAgent":
chat_history = chat_history or []
llm = llm or OpenAI(model=DEFAULT_MODEL_NAME)
if not isinstance(llm, OpenAI):
raise ValueError("llm must be a OpenAI instance")
if callback_manager is not None:
llm.callback_manager = callback_manager
memory = memory or memory_cls.from_defaults(chat_history=chat_history, llm=llm)
if not is_function_calling_model(llm.model):
raise ValueError(
f"Model name {llm.model} does not support function calling API. "
)
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
return cls(
retriever=retriever,
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
)
def _get_tools(self, message: str) -> List[BaseTool]:
tools = self._retriever.retrieve(message)
return tools
| [] |
2024-01-10 | cbmchat/llama_index | selectors~pydantic_selectors.py | from typing import Any, Optional, Sequence
from llama_index.indices.query.schema import QueryBundle
from llama_index.llms.openai import OpenAI
from llama_index.program.base_program import BasePydanticProgram
from llama_index.program.openai_program import OpenAIPydanticProgram
from llama_index.selectors.llm_selectors import _build_choices_text
from llama_index.selectors.prompts import (
DEFAULT_MULTI_PYD_SELECT_PROMPT_TMPL,
DEFAULT_SINGLE_PYD_SELECT_PROMPT_TMPL,
)
from llama_index.selectors.types import (
BaseSelector,
MultiSelection,
SelectorResult,
SingleSelection,
)
from llama_index.tools.types import ToolMetadata
def _pydantic_output_to_selector_result(output: Any) -> SelectorResult:
"""
Convert pydantic output to selector result.
Takes into account zero-indexing on answer indexes.
"""
if isinstance(output, SingleSelection):
output.index -= 1
return SelectorResult(selections=[output])
elif isinstance(output, MultiSelection):
for idx in range(len(output.selections)):
output.selections[idx].index -= 1
return SelectorResult(selections=output.selections)
else:
raise ValueError(f"Unsupported output type: {type(output)}")
class PydanticSingleSelector(BaseSelector):
def __init__(self, selector_program: BasePydanticProgram) -> None:
self._selector_program = selector_program
@classmethod
def from_defaults(
cls,
program: Optional[BasePydanticProgram] = None,
llm: Optional[OpenAI] = None,
prompt_template_str: str = DEFAULT_SINGLE_PYD_SELECT_PROMPT_TMPL,
verbose: bool = False,
) -> "PydanticSingleSelector":
if program is None:
program = OpenAIPydanticProgram.from_defaults(
output_cls=SingleSelection,
prompt_template_str=prompt_template_str,
llm=llm,
verbose=verbose,
)
return cls(selector_program=program)
def _select(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
# prepare input
choices_text = _build_choices_text(choices)
# predict
prediction = self._selector_program(
num_choices=len(choices),
context_list=choices_text,
query_str=query.query_str,
)
# parse output
return _pydantic_output_to_selector_result(prediction)
async def _aselect(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
raise NotImplementedError(
"Async selection not supported for Pydantic Selectors."
)
class PydanticMultiSelector(BaseSelector):
def __init__(
self, selector_program: BasePydanticProgram, max_outputs: Optional[int] = None
) -> None:
self._selector_program = selector_program
self._max_outputs = max_outputs
@classmethod
def from_defaults(
cls,
program: Optional[BasePydanticProgram] = None,
llm: Optional[OpenAI] = None,
prompt_template_str: str = DEFAULT_MULTI_PYD_SELECT_PROMPT_TMPL,
max_outputs: Optional[int] = None,
verbose: bool = False,
) -> "PydanticMultiSelector":
if program is None:
program = OpenAIPydanticProgram.from_defaults(
output_cls=MultiSelection,
prompt_template_str=prompt_template_str,
llm=llm,
verbose=verbose,
)
return cls(selector_program=program, max_outputs=max_outputs)
def _select(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
# prepare input
context_list = _build_choices_text(choices)
max_outputs = self._max_outputs or len(choices)
# predict
prediction = self._selector_program(
num_choices=len(choices),
max_outputs=max_outputs,
context_list=context_list,
query_str=query.query_str,
)
# parse output
return _pydantic_output_to_selector_result(prediction)
async def _aselect(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
return self._select(choices, query)
| [] |
2024-01-10 | cbmchat/llama_index | agent~context_retriever_agent.py | """Context retriever agent."""
from typing import List, Optional, Type, Union
from llama_index.agent.openai_agent import (
DEFAULT_MAX_FUNCTION_CALLS,
DEFAULT_MODEL_NAME,
BaseOpenAIAgent,
)
from llama_index.bridge.langchain import print_text
from llama_index.callbacks import CallbackManager
from llama_index.chat_engine.types import (
AgentChatResponse,
)
from llama_index.indices.base_retriever import BaseRetriever
from llama_index.llms.base import LLM, ChatMessage
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai_utils import is_function_calling_model
from llama_index.memory import BaseMemory, ChatMemoryBuffer
from llama_index.prompts.prompts import QuestionAnswerPrompt
from llama_index.schema import NodeWithScore
from llama_index.tools import BaseTool
# inspired by DEFAULT_QA_PROMPT_TMPL from llama_index/prompts/default_prompts.py
# DEFAULT_QA_PROMPT_TMPL = (
# "Context information is below.\n"
# "---------------------\n"
# "{context_str}\n"
# "---------------------\n"
# "Given the context information and not prior knowledge, "
# "either pick the corresponding tool or answer the function: {query_str}\n"
# )
DEFAULT_QA_PROMPT_TMPL = (
"上下文信息如下。\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"鉴于上下文信息而不是先验知识,"
"选择相应的工具或回答函数:{query_str}\n"
)
DEFAULT_QA_PROMPT = QuestionAnswerPrompt(DEFAULT_QA_PROMPT_TMPL)
class ContextRetrieverOpenAIAgent(BaseOpenAIAgent):
"""ContextRetriever OpenAI Agent.
This agent performs retrieval from BaseRetriever before
calling the LLM. Allows it to augment user message with context.
NOTE: this is a beta feature, function interfaces might change.
Args:
tools (List[BaseTool]): A list of tools.
retriever (BaseRetriever): A retriever.
qa_prompt (Optional[QuestionAnswerPrompt]): A QA prompt.
context_separator (str): A context separator.
llm (Optional[OpenAI]): An OpenAI LLM.
chat_history (Optional[List[ChatMessage]]): A chat history.
prefix_messages: List[ChatMessage]: A list of prefix messages.
verbose (bool): Whether to print debug statements.
max_function_calls (int): Maximum number of function calls.
callback_manager (Optional[CallbackManager]): A callback manager.
"""
def __init__(
self,
tools: List[BaseTool],
retriever: BaseRetriever,
qa_prompt: QuestionAnswerPrompt,
context_separator: str,
llm: OpenAI,
memory: BaseMemory,
prefix_messages: List[ChatMessage],
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
) -> None:
super().__init__(
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
)
self._tools = tools
self._qa_prompt = qa_prompt
self._retriever = retriever
self._context_separator = context_separator
@classmethod
def from_tools_and_retriever(
cls,
tools: List[BaseTool],
retriever: BaseRetriever,
qa_prompt: Optional[QuestionAnswerPrompt] = None,
context_separator: str = "\n",
llm: Optional[LLM] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
) -> "ContextRetrieverOpenAIAgent":
"""Create a ContextRetrieverOpenAIAgent from a retriever.
Args:
retriever (BaseRetriever): A retriever.
qa_prompt (Optional[QuestionAnswerPrompt]): A QA prompt.
context_separator (str): A context separator.
llm (Optional[OpenAI]): An OpenAI LLM.
chat_history (Optional[ChatMessageHistory]): A chat history.
verbose (bool): Whether to print debug statements.
max_function_calls (int): Maximum number of function calls.
callback_manager (Optional[CallbackManager]): A callback manager.
"""
qa_prompt = qa_prompt or DEFAULT_QA_PROMPT
chat_history = chat_history or []
llm = llm or OpenAI(model=DEFAULT_MODEL_NAME)
if not isinstance(llm, OpenAI):
raise ValueError("llm must be a OpenAI instance")
if callback_manager is not None:
llm.callback_manager = callback_manager
memory = memory or memory_cls.from_defaults(chat_history=chat_history, llm=llm)
if not is_function_calling_model(llm.model):
raise ValueError(
f"Model name {llm.model} does not support function calling API."
)
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
return cls(
tools=tools,
retriever=retriever,
qa_prompt=qa_prompt,
context_separator=context_separator,
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
)
def _get_tools(self, message: str) -> List[BaseTool]:
"""Get tools."""
return self._tools
def _build_formatted_message(self, message: str) -> str:
# augment user message
retrieved_nodes_w_scores: List[NodeWithScore] = self._retriever.retrieve(
message
)
retrieved_nodes = [node.node for node in retrieved_nodes_w_scores]
retrieved_texts = [node.get_content() for node in retrieved_nodes]
# format message
context_str = self._context_separator.join(retrieved_texts)
return self._qa_prompt.format(context_str=context_str, query_str=message)
def chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
function_call: Union[str, dict] = "auto",
) -> AgentChatResponse:
"""Chat."""
formatted_message = self._build_formatted_message(message)
if self._verbose:
print_text(formatted_message + "\n", color="yellow")
return super().chat(
formatted_message, chat_history=chat_history, function_call=function_call
)
async def achat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
function_call: Union[str, dict] = "auto",
) -> AgentChatResponse:
"""Chat."""
formatted_message = self._build_formatted_message(message)
if self._verbose:
print_text(formatted_message + "\n", color="yellow")
return await super().achat(
formatted_message, chat_history=chat_history, function_call=function_call
)
| [
"上下文信息如下。\n---------------------\n{context_str}\n---------------------\n鉴于上下文信息而不是先验知识,选择相应的工具或回答函数:{query_str}\n"
] |
2024-01-10 | cbmchat/llama_index | prompts~default_prompts.py | """Set of default prompts."""
from llama_index.prompts.base import PromptTemplate
from llama_index.prompts.prompt_type import PromptType
############################################
# Tree
############################################
# DEFAULT_SUMMARY_PROMPT_TMPL = (
# "Write a summary of the following. Try to use only the "
# "information provided. "
# "Try to include as many key details as possible.\n"
# "\n"
# "\n"
# "{context_str}\n"
# "\n"
# "\n"
# 'SUMMARY:"""\n'
# )
DEFAULT_SUMMARY_PROMPT_TMPL = (
"写一个关于以下内容的摘要。尽量只使用所提供的信息。"
"尽量包含尽可能多的关键细节。\n"
"\n"
"\n"
"{context_str}\n"
"\n"
"\n"
'摘要:"""\n'
)
DEFAULT_SUMMARY_PROMPT = PromptTemplate(
DEFAULT_SUMMARY_PROMPT_TMPL, prompt_type=PromptType.SUMMARY
)
# insert prompts
# DEFAULT_INSERT_PROMPT_TMPL = (
# "Context information is below. It is provided in a numbered list "
# "(1 to {num_chunks}),"
# "where each item in the list corresponds to a summary.\n"
# "---------------------\n"
# "{context_list}"
# "---------------------\n"
# "Given the context information, here is a new piece of "
# "information: {new_chunk_text}\n"
# "Answer with the number corresponding to the summary that should be updated. "
# "The answer should be the number corresponding to the "
# "summary that is most relevant to the question.\n"
# )
DEFAULT_INSERT_PROMPT_TMPL = (
"下面提供了上下文信息,以编号列表形式提供(从1到{num_chunks}),"
"其中列表中的每一项对应一个摘要。\n"
"---------------------\n"
"{context_list}"
"---------------------\n"
"根据上下文信息,这是一个新的信息片段:{new_chunk_text}\n"
"答案为应更新的摘要的编号。答案应为与问题最相关的摘要对应的编号。\n"
)
DEFAULT_INSERT_PROMPT = PromptTemplate(
DEFAULT_INSERT_PROMPT_TMPL, prompt_type=PromptType.TREE_INSERT
)
# # single choice
# DEFAULT_QUERY_PROMPT_TMPL = (
# "Some choices are given below. It is provided in a numbered list "
# "(1 to {num_chunks}),"
# "where each item in the list corresponds to a summary.\n"
# "---------------------\n"
# "{context_list}"
# "\n---------------------\n"
# "Using only the choices above and not prior knowledge, return "
# "the choice that is most relevant to the question: '{query_str}'\n"
# "Provide choice in the following format: 'ANSWER: <number>' and explain why "
# "this summary was selected in relation to the question.\n"
# )
DEFAULT_QUERY_PROMPT_TMPL = (
"以下是一些选择项,它们以编号列表的形式呈现(从1到{num_chunks}),"
"其中列表中的每个项目对应一个摘要。\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"仅使用上述选择,不使用先前的知识,找出与问题 '{query_str}' 最相关的选择。\n"
"请以以下格式提供答案:'ANSWER: <编号>',并解释为什么选择这个摘要与问题相关。\n"
)
DEFAULT_QUERY_PROMPT = PromptTemplate(
DEFAULT_QUERY_PROMPT_TMPL, prompt_type=PromptType.TREE_SELECT
)
# multiple choice
# DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL = (
# "Some choices are given below. It is provided in a numbered "
# "list (1 to {num_chunks}), "
# "where each item in the list corresponds to a summary.\n"
# "---------------------\n"
# "{context_list}"
# "\n---------------------\n"
# "Using only the choices above and not prior knowledge, return the top choices "
# "(no more than {branching_factor}, ranked by most relevant to least) that "
# "are most relevant to the question: '{query_str}'\n"
# "Provide choices in the following format: 'ANSWER: <numbers>' and explain why "
# "these summaries were selected in relation to the question.\n"
# )
DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL = (
"下面列出了一些选择项,它们以编号列表的形式呈现(从1到{num_chunks}),"
"列表中的每个项目对应一个摘要。\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"仅使用上述选择,不使用先前的知识,返回与问题 '{query_str}' 最相关的前若干选择项 "
"(不超过{branching_factor}个),按从最相关到最不相关的顺序排列。\n"
"请以以下格式提供选择:'ANSWER: <编号>',并解释为什么选择这些摘要与问题相关。\n"
)
DEFAULT_QUERY_PROMPT_MULTIPLE = PromptTemplate(
DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL, prompt_type=PromptType.TREE_SELECT_MULTIPLE
)
# DEFAULT_REFINE_PROMPT_TMPL = (
# "The original query is as follows: {query_str}\n"
# "We have provided an existing answer: {existing_answer}\n"
# "We have the opportunity to refine the existing answer "
# "(only if needed) with some more context below.\n"
# "------------\n"
# "{context_msg}\n"
# "------------\n"
# "Given the new context, refine the original answer to better "
# "answer the query. "
# "If the context isn't useful, return the original answer.\n"
# "Refined Answer: "
# )
DEFAULT_REFINE_PROMPT_TMPL = (
"原始查询如下:{query_str}\n"
"我们已经提供了一个现有答案:{existing_answer}\n"
"我们有机会通过以下一些更多的上下文来完善现有答案(仅在需要时)。 \n"
"------------\n"
"{context_msg}\n"
"------------\n"
"在新的上下文基础上,完善原始答案以更好地回答查询。"
"如果上下文对于完善答案没有帮助,那么返回原始答案。\n"
"完善后的答案:"
)
DEFAULT_REFINE_PROMPT = PromptTemplate(
DEFAULT_REFINE_PROMPT_TMPL, prompt_type=PromptType.REFINE
)
# DEFAULT_TEXT_QA_PROMPT_TMPL = (
# "Context information is below.\n"
# "---------------------\n"
# "{context_str}\n"
# "---------------------\n"
# "Given the context information and not prior knowledge, "
# "answer the query.\n"
# "Query: {query_str}\n"
# "Answer: "
# )
DEFAULT_TEXT_QA_PROMPT_TMPL = (
"下面是上下文信息。\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"根据上下文信息回答问题。\n"
"问题:{query_str},详细说说\n"
"答案:"
)
DEFAULT_TEXT_QA_PROMPT = PromptTemplate(
DEFAULT_TEXT_QA_PROMPT_TMPL, prompt_type=PromptType.QUESTION_ANSWER
)
# DEFAULT_TREE_SUMMARIZE_TMPL = (
# "Context information from multiple sources is below.\n"
# "---------------------\n"
# "{context_str}\n"
# "---------------------\n"
# "Given the information from multiple sources and not prior knowledge, "
# "answer the query.\n"
# "Query: {query_str}\n"
# "Answer: "
# )
DEFAULT_TREE_SUMMARIZE_TMPL = (
"下面是来自多个来源的上下文信息。\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"根据来自多个来源的信息而不是先前的知识,回答查询。\n"
"查询:{query_str}\n"
"答案:"
)
DEFAULT_TREE_SUMMARIZE_PROMPT = PromptTemplate(
DEFAULT_TREE_SUMMARIZE_TMPL, prompt_type=PromptType.SUMMARY
)
############################################
# Keyword Table
############################################
# DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
# "Some text is provided below. Given the text, extract up to {max_keywords} "
# "keywords from the text. Avoid stopwords."
# "---------------------\n"
# "{text}\n"
# "---------------------\n"
# "Provide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n"
# )
DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
"下面提供了一些文本。根据文本,从中提取最多 {max_keywords} 个关键词。避免使用停用词。"
"---------------------\n"
"{text}\n"
"---------------------\n"
"请以以下逗号分隔的格式提供关键词:'KEYWORDS: <关键词>'\n"
)
DEFAULT_KEYWORD_EXTRACT_TEMPLATE = PromptTemplate(
DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL, prompt_type=PromptType.KEYWORD_EXTRACT
)
# NOTE: the keyword extraction for queries can be the same as
# the one used to build the index, but here we tune it to see if performance is better.
# DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
# "A question is provided below. Given the question, extract up to {max_keywords} "
# "keywords from the text. Focus on extracting the keywords that we can use "
# "to best lookup answers to the question. Avoid stopwords.\n"
# "---------------------\n"
# "{question}\n"
# "---------------------\n"
# "Provide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n"
# )
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
"下面提供了一个问题。根据问题,从中提取最多 {max_keywords} 个关键词。专注于提取我们可以用来最佳查找答案的关键词。避免使用停用词。\n"
"---------------------\n"
"示例:"
"问题:公司中层在杭州的住宿费是多少?\n"
"关键词:公司中层,杭州,住宿费\n"
"---------------------\n"
"问题:{question}\n"
"关键词:\n"
)
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE = PromptTemplate(
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL,
prompt_type=PromptType.QUERY_KEYWORD_EXTRACT,
)
############################################
# Structured Store
############################################
# DEFAULT_SCHEMA_EXTRACT_TMPL = (
# "We wish to extract relevant fields from an unstructured text chunk into "
# "a structured schema. We first provide the unstructured text, and then "
# "we provide the schema that we wish to extract. "
# "-----------text-----------\n"
# "{text}\n"
# "-----------schema-----------\n"
# "{schema}\n"
# "---------------------\n"
# "Given the text and schema, extract the relevant fields from the text in "
# "the following format: "
# "field1: <value>\nfield2: <value>\n...\n\n"
# "If a field is not present in the text, don't include it in the output."
# "If no fields are present in the text, return a blank string.\n"
# "Fields: "
# )
DEFAULT_SCHEMA_EXTRACT_TMPL = (
"我们希望从非结构化的文本块中提取相关字段,生成一个结构化模式。"
"我们首先提供非结构化文本,然后提供我们希望提取的模式。"
"-----------文本-----------\n"
"{text}\n"
"-----------模式-----------\n"
"{schema}\n"
"---------------------\n"
"根据给定的文本和模式,在以下格式中从文本中提取相关字段:"
"字段1: <值>\n字段2: <值>\n...\n\n"
"如果文本中没有某个字段,请不要在输出中包含它。"
"如果文本中没有任何字段,请返回一个空字符串。\n"
"字段:"
)
DEFAULT_SCHEMA_EXTRACT_PROMPT = PromptTemplate(
DEFAULT_SCHEMA_EXTRACT_TMPL, prompt_type=PromptType.SCHEMA_EXTRACT
)
# NOTE: taken from langchain and adapted
# https://tinyurl.com/b772sd77
# DEFAULT_TEXT_TO_SQL_TMPL = (
# "Given an input question, first create a syntactically correct {dialect} "
# "query to run, then look at the results of the query and return the answer. "
# "You can order the results by a relevant column to return the most "
# "interesting examples in the database.\n"
# "Never query for all the columns from a specific table, only ask for a "
# "few relevant columns given the question.\n"
# "Pay attention to use only the column names that you can see in the schema "
# "description. "
# "Be careful to not query for columns that do not exist. "
# "Pay attention to which column is in which table. "
# "Also, qualify column names with the table name when needed.\n"
# "Use the following format:\n"
# "Question: Question here\n"
# "SQLQuery: SQL Query to run\n"
# "SQLResult: Result of the SQLQuery\n"
# "Answer: Final answer here\n"
# "Only use the tables listed below.\n"
# "{schema}\n"
# "Question: {query_str}\n"
# "SQLQuery: "
# )
DEFAULT_TEXT_TO_SQL_TMPL = (
"给定一个输入问题,首先创建一个符合语法的{dialect}查询以运行,然后查看查询结果并返回答案。"
"您可以通过相关列对结果进行排序,以返回数据库中最有趣的示例。"
"永远不要查询特定表中的所有列,只询问与问题相关的少数列。"
"注意仅使用在模式描述中可见的列名。"
"小心不要查询不存在的列。"
"注意哪个列位于哪个表中。"
"在需要时,也要用表名限定列名。\n"
"使用以下格式:\n"
"问题:在这里提出问题\n"
"SQL查询:要运行的SQL查询\n"
"SQL结果:SQL查询结果\n"
"答案:在这里给出最终答案\n"
"仅使用下面列出的表。\n"
"{schema}\n"
"问题:{query_str}\n"
"SQL查询:"
)
DEFAULT_TEXT_TO_SQL_PROMPT = PromptTemplate(
DEFAULT_TEXT_TO_SQL_TMPL,
prompt_type=PromptType.TEXT_TO_SQL,
)
# NOTE: by partially filling schema, we can reduce to a QuestionAnswer prompt
# that we can feed to ur table
# DEFAULT_TABLE_CONTEXT_TMPL = (
# "We have provided a table schema below. "
# "---------------------\n"
# "{schema}\n"
# "---------------------\n"
# "We have also provided context information below. "
# "{context_str}\n"
# "---------------------\n"
# "Given the context information and the table schema, "
# "give a response to the following task: {query_str}"
# )
DEFAULT_TABLE_CONTEXT_TMPL = (
"我们在下面提供了一个表结构。"
"---------------------\n"
"{schema}\n"
"---------------------\n"
"我们还在下面提供了一些上下文信息。"
"{context_str}\n"
"---------------------\n"
"根据上下文信息和表结构,"
"针对以下任务给出一个回答:{query_str}"
)
# DEFAULT_TABLE_CONTEXT_QUERY = (
# "Provide a high-level description of the table, "
# "as well as a description of each column in the table. "
# "Provide answers in the following format:\n"
# "TableDescription: <description>\n"
# "Column1Description: <description>\n"
# "Column2Description: <description>\n"
# "...\n\n"
# )
DEFAULT_TABLE_CONTEXT_QUERY = (
"提供一个关于表的高级描述,以及表中每个列的描述。"
"请按以下格式提供答案:\n"
"表描述: <描述>\n"
"列1描述: <描述>\n"
"列2描述: <描述>\n"
"...\n\n"
)
DEFAULT_TABLE_CONTEXT_PROMPT = PromptTemplate(
DEFAULT_TABLE_CONTEXT_TMPL, prompt_type=PromptType.TABLE_CONTEXT
)
# NOTE: by partially filling schema, we can reduce to a RefinePrompt
# that we can feed to ur table
# DEFAULT_REFINE_TABLE_CONTEXT_TMPL = (
# "We have provided a table schema below. "
# "---------------------\n"
# "{schema}\n"
# "---------------------\n"
# "We have also provided some context information below. "
# "{context_msg}\n"
# "---------------------\n"
# "Given the context information and the table schema, "
# "give a response to the following task: {query_str}\n"
# "We have provided an existing answer: {existing_answer}\n"
# "Given the new context, refine the original answer to better "
# "answer the question. "
# "If the context isn't useful, return the original answer."
# )
DEFAULT_REFINE_TABLE_CONTEXT_TMPL = (
"我们在下面提供了一个表结构。"
"---------------------\n"
"{schema}\n"
"---------------------\n"
"我们还在下面提供了一些上下文信息。"
"{context_msg}\n"
"---------------------\n"
"根据上下文信息和表结构,"
"针对以下任务给出一个回答:{query_str}\n"
"我们已经提供了一个现有答案:{existing_answer}\n"
"根据新的上下文,优化原始答案以更好地回答问题。"
"如果上下文无用,请保持原始答案。"
)
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT = PromptTemplate(
DEFAULT_REFINE_TABLE_CONTEXT_TMPL, prompt_type=PromptType.TABLE_CONTEXT
)
############################################
# Knowledge-Graph Table
############################################
DEFAULT_KG_TRIPLET_EXTRACT_TMPL = (
"Some text is provided below. Given the text, extract up to "
"{max_knowledge_triplets} "
"knowledge triplets in the form of (subject, predicate, object). Avoid stopwords.\n"
"---------------------\n"
"Example:"
"Text: Alice is Bob's mother."
"Triplets:\n(Alice, is mother of, Bob)\n"
"Text: Philz is a coffee shop founded in Berkeley in 1982.\n"
"Triplets:\n"
"(Philz, is, coffee shop)\n"
"(Philz, founded in, Berkeley)\n"
"(Philz, founded in, 1982)\n"
"---------------------\n"
"Text: {text}\n"
"Triplets:\n"
)
# DEFAULT_KG_TRIPLET_EXTRACT_TMPL = (
# "下面提供了一些文本。根据文本,提取最多 {max_knowledge_triplets} 个知识三元组,"
# "形式为(主语,谓语,宾语)。避免使用停用词。\n"
# "---------------------\n"
# "示例:"
# "文本:Alice是Bob的母亲。"
# "三元组:\n(Alice,是...的母亲,Bob)\n"
# "文本:Philz是于1982年在伯克利创立的咖啡店。\n"
# "三元组:\n"
# "(Philz,是,咖啡店)\n"
# "(Philz,创立于,伯克利)\n"
# "(Philz,创立于,1982年)\n"
# "---------------------\n"
# "文本:{text}\n"
# "三元组:\n"
# )
DEFAULT_KG_TRIPLET_EXTRACT_PROMPT = PromptTemplate(
DEFAULT_KG_TRIPLET_EXTRACT_TMPL, prompt_type=PromptType.KNOWLEDGE_TRIPLET_EXTRACT
)
############################################
# HYDE
##############################################
# HYDE_TMPL = (
# "Please write a passage to answer the question\n"
# "Try to include as many key details as possible.\n"
# "\n"
# "\n"
# "{context_str}\n"
# "\n"
# "\n"
# 'Passage:"""\n'
# )
HYDE_TMPL = (
"请撰写一个段落来回答问题\n"
"尽量包含尽可能多的关键细节。\n"
"\n"
"\n"
"{context_str}\n"
"\n"
"\n"
'段落:"""\n'
)
DEFAULT_HYDE_PROMPT = PromptTemplate(HYDE_TMPL, prompt_type=PromptType.SUMMARY)
############################################
# Simple Input
############################################
DEFAULT_SIMPLE_INPUT_TMPL = "{query_str}"
DEFAULT_SIMPLE_INPUT_PROMPT = PromptTemplate(
DEFAULT_SIMPLE_INPUT_TMPL, prompt_type=PromptType.SIMPLE_INPUT
)
############################################
# Pandas
############################################
# DEFAULT_PANDAS_TMPL = (
# "You are working with a pandas dataframe in Python.\n"
# "The name of the dataframe is `df`.\n"
# "This is the result of `print(df.head())`:\n"
# "{df_str}\n\n"
# "Here is the input query: {query_str}.\n"
# "Given the df information and the input query, please follow "
# "these instructions:\n"
# "{instruction_str}"
# "Output:\n"
# )
DEFAULT_PANDAS_TMPL = (
"您正在使用 Python 中的 pandas 数据帧。\n"
"数据帧的名称是 `df`。\n"
"这是 `print(df.head())` 的结果:\n"
"{df_str}\n\n"
"这是输入的查询:{query_str}。\n"
"根据 df 信息和输入的查询,请遵循以下说明:\n"
"{instruction_str}"
"输出:\n"
)
DEFAULT_PANDAS_PROMPT = PromptTemplate(DEFAULT_PANDAS_TMPL, prompt_type=PromptType.PANDAS)
############################################
# JSON Path
############################################
# DEFAULT_JSON_PATH_TMPL = (
# "We have provided a JSON schema below:\n"
# "{schema}\n"
# "Given a task, respond with a JSON Path query that "
# "can retrieve data from a JSON value that matches the schema.\n"
# "Task: {query_str}\n"
# "JSONPath: "
# )
DEFAULT_JSON_PATH_TMPL = (
"我们在下面提供了一个 JSON 模式:\n"
"{schema}\n"
"根据任务,使用一个 JSON Path 查询来检索与模式匹配的 JSON 值中的数据。\n"
"任务:{query_str}\n"
"JSONPath:"
)
DEFAULT_JSON_PATH_PROMPT = PromptTemplate(
DEFAULT_JSON_PATH_TMPL, prompt_type=PromptType.JSON_PATH
)
############################################
# Choice Select
############################################
# DEFAULT_CHOICE_SELECT_PROMPT_TMPL = (
# "A list of documents is shown below. Each document has a number next to it along "
# "with a summary of the document. A question is also provided. \n"
# "Respond with the numbers of the documents "
# "you should consult to answer the question, in order of relevance, as well \n"
# "as the relevance score. The relevance score is a number from 1-10 based on "
# "how relevant you think the document is to the question.\n"
# "Do not include any documents that are not relevant to the question. \n"
# "Example format: \n"
# "Document 1:\n<summary of document 1>\n\n"
# "Document 2:\n<summary of document 2>\n\n"
# "...\n\n"
# "Document 10:\n<summary of document 10>\n\n"
# "Question: <question>\n"
# "Answer:\n"
# "Doc: 9, Relevance: 7\n"
# "Doc: 3, Relevance: 4\n"
# "Doc: 7, Relevance: 3\n\n"
# "Let's try this now: \n\n"
# "{context_str}\n"
# "Question: {query_str}\n"
# "Answer:\n"
# )
DEFAULT_CHOICE_SELECT_PROMPT_TMPL = (
"下面显示了一份文档列表。每个文档旁边都有一个数字,以及文档的摘要。还提供了一个问题。\n"
"请按照相关性顺序回答,列出您认为用于回答问题的文档的编号以及相关性评分(1-10)。\n"
"请勿包括与问题无关的文档。\n"
"示例格式:\n"
"文档 1:\n<文档 1 的摘要>\n\n"
"文档 2:\n<文档 2 的摘要>\n\n"
"...\n\n"
"文档 10:\n<文档 10 的摘要>\n\n"
"问题: <问题>\n"
"答案:\n"
"文档:9,相关性:7\n"
"文档:3,相关性:4\n"
"文档:7,相关性:3\n\n"
"现在让我们试一试:\n\n"
"{context_str}\n"
"问题: {query_str}\n"
"答案:\n"
)
DEFAULT_CHOICE_SELECT_PROMPT = PromptTemplate(
DEFAULT_CHOICE_SELECT_PROMPT_TMPL, prompt_type=PromptType.CHOICE_SELECT
) | [
"原始查询如下:{query_str}\n我们已经提供了一个现有答案:{existing_answer}\n我们有机会通过以下一些更多的上下文来完善现有答案(仅在需要时)。 \n------------\n{context_msg}\n------------\n在新的上下文基础上,完善原始答案以更好地回答查询。如果上下文对于完善答案没有帮助,那么返回原始答案。\n完善后的答案:",
"下面列出了一些选择项,它们以编号列表的形式呈现(从1到{num_chunks}),列表中的每个项目对应一个摘要。\n---------------------\n{context_list}\n---------------------\n仅使用上述选择,不使用先前的知识,返回与问题 '{query_str}' 最相关的前若干选择项 (不超过{branching_factor}个),按从最相关到最不相关的顺序排列。\n请以以下格式提供选择:'ANSWER: <编号>',并解释为什么选择这些摘要与问题相关。\n",
"下面提供了上下文信息,以编号列表形式提供(从1到{num_chunks}),其中列表中的每一项对应一个摘要。\n---------------------\n{context_list}---------------------\n根据上下文信息,这是一个新的信息片段:{new_chunk_text}\n答案为应更新的摘要的编号。答案应为与问题最相关的摘要对应的编号。\n",
"{query_str}",
"下面提供了一些文本。根据文本,从中提取最多 {max_keywords} 个关键词。避免使用停用词。---------------------\n{text}\n---------------------\n请以以下逗号分隔的格式提供关键词:'KEYWORDS: <关键词>'\n",
"下面显示了一份文档列表。每个文档旁边都有一个数字,以及文档的摘要。还提供了一个问题。\n请按照相关性顺序回答,列出您认为用于回答问题的文档的编号以及相关性评分(1-10)。\n请勿包括与问题无关的文档。\n示例格式:\n文档 1:\n<文档 1 的摘要>\n\n文档 2:\n<文档 2 的摘要>\n\n...\n\n文档 10:\n<文档 10 的摘要>\n\n问题: <问题>\n答案:\n文档:9,相关性:7\n文档:3,相关性:4\n文档:7,相关性:3\n\n现在让我们试一试:\n\n{context_str}\n问题: {query_str}\n答案:\n",
"下面是上下文信息。\n---------------------\n{context_str}\n---------------------\n根据上下文信息回答问题。\n问题:{query_str},详细说说\n答案:",
"写一个关于以下内容的摘要。尽量只使用所提供的信息。尽量包含尽可能多的关键细节。\n\n\n{context_str}\n\n\n摘要:\"\"\"\n",
"下面提供了一个问题。根据问题,从中提取最多 {max_keywords} 个关键词。专注于提取我们可以用来最佳查找答案的关键词。避免使用停用词。\n---------------------\n示例:问题:公司中层在杭州的住宿费是多少?\n关键词:公司中层,杭州,住宿费\n---------------------\n问题:{question}\n关键词:\n",
"以下是一些选择项,它们以编号列表的形式呈现(从1到{num_chunks}),其中列表中的每个项目对应一个摘要。\n---------------------\n{context_list}\n---------------------\n仅使用上述选择,不使用先前的知识,找出与问题 '{query_str}' 最相关的选择。\n请以以下格式提供答案:'ANSWER: <编号>',并解释为什么选择这个摘要与问题相关。\n"
] |
2024-01-10 | cbmchat/llama_index | schema.py | """Base schema for data structures."""
import json
import uuid
from abc import abstractmethod
from enum import Enum, auto
from hashlib import sha256
from typing import Any, Dict, List, Optional, Union
from typing_extensions import Self
from llama_index.bridge.pydantic import BaseModel, Field, root_validator
from llama_index.bridge.langchain import Document as LCDocument
from llama_index.utils import SAMPLE_TEXT
DEFAULT_TEXT_NODE_TMPL = "{metadata_str}\n\n{content}"
DEFAULT_METADATA_TMPL = "{key}: {value}"
class BaseComponent(BaseModel):
"""Base component object to caputure class names."""
@classmethod
@abstractmethod
def class_name(cls) -> str:
"""Get class name."""
def to_dict(self, **kwargs: Any) -> Dict[str, Any]:
data = self.dict(**kwargs)
data["class_name"] = self.class_name()
return data
def to_json(self, **kwargs: Any) -> str:
data = self.to_dict(**kwargs)
return json.dumps(data)
# TODO: return type here not supported by current mypy version
@classmethod
def from_dict(cls, data: Dict[str, Any], **kwargs: Any) -> Self: # type: ignore
if isinstance(kwargs, dict):
data.update(kwargs)
data.pop("class_name", None)
return cls(**data)
@classmethod
def from_json(cls, data_str: str, **kwargs: Any) -> Self: # type: ignore
data = json.loads(data_str)
return cls.from_dict(data, **kwargs)
class NodeRelationship(str, Enum):
"""Node relationships used in `BaseNode` class.
Attributes:
SOURCE: The node is the source document.
PREVIOUS: The node is the previous node in the document.
NEXT: The node is the next node in the document.
PARENT: The node is the parent node in the document.
CHILD: The node is a child node in the document.
"""
SOURCE = auto()
PREVIOUS = auto()
NEXT = auto()
PARENT = auto()
CHILD = auto()
class ObjectType(str, Enum):
TEXT = auto()
IMAGE = auto()
INDEX = auto()
DOCUMENT = auto()
class MetadataMode(str, Enum):
ALL = auto()
EMBED = auto()
LLM = auto()
NONE = auto()
class RelatedNodeInfo(BaseComponent):
node_id: str
node_type: Optional[ObjectType] = None
metadata: Dict[str, Any] = Field(default_factory=dict)
hash: Optional[str] = None
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "RelatedNodeInfo"
RelatedNodeType = Union[RelatedNodeInfo, List[RelatedNodeInfo]]
# Node classes for indexes
class BaseNode(BaseComponent):
"""Base node Object.
Generic abstract interface for retrievable nodes
"""
class Config:
allow_population_by_field_name = True
id_: str = Field(
default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the node."
)
embedding: Optional[List[float]] = Field(
default=None, description="Embedding of the node."
)
""""
metadata fields
- injected as part of the text shown to LLMs as context
- injected as part of the text for generating embeddings
- used by vector DBs for metadata filtering
"""
metadata: Dict[str, Any] = Field(
default_factory=dict,
description="A flat dictionary of metadata fields",
alias="extra_info",
)
excluded_embed_metadata_keys: List[str] = Field(
default_factory=list,
description="Metadata keys that are exluded from text for the embed model.",
)
excluded_llm_metadata_keys: List[str] = Field(
default_factory=list,
description="Metadata keys that are exluded from text for the LLM.",
)
relationships: Dict[NodeRelationship, RelatedNodeType] = Field(
default_factory=dict,
description="A mapping of relationships to other node information.",
)
hash: str = Field(default="", description="Hash of the node content.")
@classmethod
@abstractmethod
def get_type(cls) -> str:
"""Get Object type."""
@abstractmethod
def get_content(self, metadata_mode: MetadataMode = MetadataMode.ALL) -> str:
"""Get object content."""
@abstractmethod
def get_metadata_str(self, mode: MetadataMode = MetadataMode.ALL) -> str:
"""Metadata string."""
@abstractmethod
def set_content(self, value: Any) -> None:
"""Set the content of the node."""
@property
def node_id(self) -> str:
return self.id_
@node_id.setter
def node_id(self, value: str) -> None:
self.id_ = value
@property
def source_node(self) -> Optional[RelatedNodeInfo]:
"""Source object node.
Extracted from the relationships field.
"""
if NodeRelationship.SOURCE not in self.relationships:
return None
relation = self.relationships[NodeRelationship.SOURCE]
if isinstance(relation, list):
raise ValueError("Source object must be a single RelatedNodeInfo object")
return relation
@property
def prev_node(self) -> Optional[RelatedNodeInfo]:
"""Prev node."""
if NodeRelationship.PREVIOUS not in self.relationships:
return None
relation = self.relationships[NodeRelationship.PREVIOUS]
if not isinstance(relation, RelatedNodeInfo):
raise ValueError("Previous object must be a single RelatedNodeInfo object")
return relation
@property
def next_node(self) -> Optional[RelatedNodeInfo]:
"""Next node."""
if NodeRelationship.NEXT not in self.relationships:
return None
relation = self.relationships[NodeRelationship.NEXT]
if not isinstance(relation, RelatedNodeInfo):
raise ValueError("Next object must be a single RelatedNodeInfo object")
return relation
@property
def parent_node(self) -> Optional[RelatedNodeInfo]:
"""Parent node."""
if NodeRelationship.PARENT not in self.relationships:
return None
relation = self.relationships[NodeRelationship.PARENT]
if not isinstance(relation, RelatedNodeInfo):
raise ValueError("Parent object must be a single RelatedNodeInfo object")
return relation
@property
def child_nodes(self) -> Optional[List[RelatedNodeInfo]]:
"""Child nodes."""
if NodeRelationship.CHILD not in self.relationships:
return None
relation = self.relationships[NodeRelationship.CHILD]
if not isinstance(relation, list):
raise ValueError("Child objects must be a list of RelatedNodeInfo objects.")
return relation
@property
def ref_doc_id(self) -> Optional[str]:
"""Deprecated: Get ref doc id."""
source_node = self.source_node
if source_node is None:
return None
return source_node.node_id
@property
def extra_info(self) -> Dict[str, Any]:
"""TODO: DEPRECATED: Extra info."""
return self.metadata
def get_embedding(self) -> List[float]:
"""Get embedding.
Errors if embedding is None.
"""
if self.embedding is None:
raise ValueError("embedding not set.")
return self.embedding
def as_related_node_info(self) -> RelatedNodeInfo:
"""Get node as RelatedNodeInfo."""
return RelatedNodeInfo(
node_id=self.node_id, metadata=self.metadata, hash=self.hash
)
class TextNode(BaseNode):
text: str = Field(default="", description="Text content of the node.")
start_char_idx: Optional[int] = Field(
default=None, description="Start char index of the node."
)
end_char_idx: Optional[int] = Field(
default=None, description="End char index of the node."
)
file: Optional[str] = None
text_template: str = Field(
default=DEFAULT_TEXT_NODE_TMPL,
description=(
"Template for how text is formatted, with {content} and "
"{metadata_str} placeholders."
),
)
metadata_template: str = Field(
default=DEFAULT_METADATA_TMPL,
description=(
"Template for how metadata is formatted, with {key} and "
"{value} placeholders."
),
)
metadata_seperator: str = Field(
default="\n",
description="Seperator between metadata fields when converting to string.",
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "TextNode"
@root_validator
def _check_hash(cls, values: dict) -> dict:
"""Generate a hash to represent the node."""
text = values.get("text", "")
metadata = values.get("metadata", {})
doc_identity = str(text) + str(metadata)
values["hash"] = str(
sha256(doc_identity.encode("utf-8", "surrogatepass")).hexdigest()
)
return values
@classmethod
def get_type(cls) -> str:
"""Get Object type."""
return ObjectType.TEXT
def get_content(self, metadata_mode: MetadataMode = MetadataMode.NONE) -> str:
"""Get object content."""
metadata_str = self.get_metadata_str(mode=metadata_mode).strip()
if not metadata_str:
return self.text
return self.text_template.format(
content=self.text, metadata_str=metadata_str
).strip()
def get_metadata_str(self, mode: MetadataMode = MetadataMode.ALL) -> str:
"""metadata info string."""
if mode == MetadataMode.NONE:
return ""
usable_metadata_keys = set(self.metadata.keys())
if mode == MetadataMode.LLM:
for key in self.excluded_llm_metadata_keys:
if key in usable_metadata_keys:
usable_metadata_keys.remove(key)
elif mode == MetadataMode.EMBED:
for key in self.excluded_embed_metadata_keys:
if key in usable_metadata_keys:
usable_metadata_keys.remove(key)
return self.metadata_seperator.join(
[
self.metadata_template.format(key=key, value=str(value))
for key, value in self.metadata.items()
if key in usable_metadata_keys
]
)
def set_content(self, value: str) -> None:
"""Set the content of the node."""
self.text = value
def get_node_info(self) -> Dict[str, Any]:
"""Get node info."""
return {"start": self.start_char_idx, "end": self.end_char_idx}
def get_text(self) -> str:
return self.get_content(metadata_mode=MetadataMode.NONE)
@property
def node_info(self) -> Dict[str, Any]:
"""Deprecated: Get node info."""
return self.get_node_info()
# TODO: legacy backport of old Node class
Node = TextNode
class ImageNode(TextNode):
"""Node with image."""
# TODO: store reference instead of actual image
# base64 encoded image str
image: Optional[str] = None
@classmethod
def get_type(cls) -> str:
return ObjectType.IMAGE
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "ImageNode"
class IndexNode(TextNode):
"""Node with reference to any object.
This can include other indices, query engines, retrievers.
This can also include other nodes (though this is overlapping with `relationships`
on the Node class).
"""
index_id: str
@classmethod
def from_text_node(
cls,
node: TextNode,
index_id: str,
) -> "IndexNode":
"""Create index node from text node."""
# copy all attributes from text node, add index id
return cls(
**node.dict(),
index_id=index_id,
)
@classmethod
def get_type(cls) -> str:
return ObjectType.INDEX
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "IndexNode"
class NodeWithScore(BaseComponent):
node: BaseNode
score: Optional[float] = None
def get_score(self, raise_error: bool = False) -> float:
"""Get score."""
if self.score is None:
if raise_error:
raise ValueError("Score not set.")
else:
return 0.0
else:
return self.score
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "NodeWithScore"
# Document Classes for Readers
class Document(TextNode):
"""Generic interface for a data document.
This document connects to data sources.
"""
# TODO: A lot of backwards compatibility logic here, clean up
id_: str = Field(
default_factory=lambda: str(uuid.uuid4()),
description="Unique ID of the node.",
alias="doc_id",
)
_compat_fields = {"doc_id": "id_", "extra_info": "metadata"}
@classmethod
def get_type(cls) -> str:
"""Get Document type."""
return ObjectType.DOCUMENT
@property
def doc_id(self) -> str:
"""Get document ID."""
return self.id_
def get_doc_id(self) -> str:
"""TODO: Deprecated: Get document ID."""
return self.id_
def __setattr__(self, name: str, value: object) -> None:
if name in self._compat_fields:
name = self._compat_fields[name]
super().__setattr__(name, value)
def to_langchain_format(self) -> LCDocument:
"""Convert struct to LangChain document format."""
metadata = self.metadata or {}
return LCDocument(page_content=self.text, metadata=metadata)
@classmethod
def from_langchain_format(cls, doc: LCDocument) -> "Document":
"""Convert struct from LangChain document format."""
return cls(text=doc.page_content, metadata=doc.metadata)
@classmethod
def example(cls) -> "Document":
document = Document(
text=SAMPLE_TEXT,
metadata={"filename": "README.md", "category": "codebase"},
)
return document
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Document"
class ImageDocument(Document):
"""Data document containing an image."""
# base64 encoded image str
image: Optional[str] = None
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "ImageDocument"
| [
"{metadata_str} placeholders.",
"Template for how metadata is formatted, with {key} and ",
"{value} placeholders.",
"Template for how text is formatted, with {content} and "
] |
2024-01-10 | cbmchat/llama_index | llama_index~evaluation~dataset_generation.py | """Dataset generation from documents."""
from __future__ import annotations
import asyncio
import json
import re
import uuid
from typing import Dict, List, Optional, Tuple
from pydantic import BaseModel, Field
from llama_index import Document, ServiceContext, SummaryIndex
from llama_index.indices.postprocessor.node import KeywordNodePostprocessor
from llama_index.llms.openai import OpenAI
from llama_index.prompts.base import BasePromptTemplate, PromptTemplate
from llama_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
from llama_index.schema import BaseNode, MetadataMode, NodeWithScore
# DEFAULT_QUESTION_GENERATION_PROMPT = """\
# Context information is below.
# ---------------------
# {context_str}
# ---------------------
# Given the context information and not prior knowledge.
# generate only questions based on the below query.
# {query_str}
DEFAULT_QUESTION_GENERATION_PROMPT = """上下文信息如下。\n"
"\n---------------------\n{context_str}\n-------------------- --\n"
"给出上下文信息而不是先验知识。\n"
"仅根据以下查询生成问题。\n"
"{query_str}\n"
"""
def _get_default_service_context() -> ServiceContext:
"""Get default service context."""
llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
return ServiceContext.from_defaults(llm=llm, chunk_size_limit=3000)
class QueryResponseDataset(BaseModel):
"""Query Response Dataset.
The response can be empty if the dataset is generated from documents.
Args:
queries (Dict[str, str]): Query id -> query.
responses (Dict[str, str]): Query id -> response.
"""
queries: Dict[str, str] = Field(
default_factory=dict, description="Query id -> query"
)
responses: Dict[str, str] = Field(
default_factory=dict, description="Query id -> response"
)
@classmethod
def from_qr_pairs(
cls,
qr_pairs: List[Tuple[str, str]],
) -> QueryResponseDataset:
"""Create from qr pairs."""
# define ids as simple integers
queries = {str(idx): query for idx, (query, _) in enumerate(qr_pairs)}
responses = {str(idx): response for idx, (_, response) in enumerate(qr_pairs)}
return cls(queries=queries, responses=responses)
@property
def qr_pairs(self) -> List[Tuple[str, str]]:
"""Get pairs."""
# if query_id not in response, throw error
for query_id in self.queries:
if query_id not in self.responses:
raise ValueError(f"Query id {query_id} not in responses")
return [
(self.queries[query_id], self.responses[query_id])
for query_id in self.queries
]
@property
def questions(self) -> List[str]:
"""Get questions."""
return list(self.queries.values())
def save_json(self, path: str) -> None:
"""Save json."""
with open(path, "w") as f:
json.dump(self.dict(), f, indent=4)
@classmethod
def from_json(cls, path: str) -> QueryResponseDataset:
"""Load json."""
with open(path) as f:
data = json.load(f)
return cls(**data)
class DatasetGenerator(PromptMixin):
"""Generate dataset (question/ question-answer pairs) \
based on the given documents.
NOTE: this is a beta feature, subject to change!
Args:
nodes (List[Node]): List of nodes. (Optional)
service_context (ServiceContext): Service Context.
num_questions_per_chunk: number of question to be \
generated per chunk. Each document is chunked of size 512 words.
text_question_template: Question generation template.
question_gen_query: Question generation query.
"""
def __init__(
self,
nodes: List[BaseNode],
service_context: ServiceContext | None = None,
num_questions_per_chunk: int = 10,
text_question_template: BasePromptTemplate | None = None,
text_qa_template: BasePromptTemplate | None = None,
question_gen_query: str | None = None,
metadata_mode: MetadataMode = MetadataMode.NONE,
show_progress: bool = False,
) -> None:
"""Init params."""
if service_context is None:
service_context = _get_default_service_context()
self.service_context = service_context
self.text_question_template = text_question_template or QuestionAnswerPrompt(
DEFAULT_QUESTION_GENERATION_PROMPT
)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
self.question_gen_query = (
question_gen_query
or f"You are a Teacher/Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. Restrict the questions to the \
context information provided."
)
self.nodes = nodes
self._metadata_mode = metadata_mode
self._show_progress = show_progress
@classmethod
def from_documents(
cls,
documents: List[Document],
service_context: ServiceContext | None = None,
num_questions_per_chunk: int = 10,
text_question_template: BasePromptTemplate | None = None,
text_qa_template: BasePromptTemplate | None = None,
question_gen_query: str | None = None,
required_keywords: List[str] | None = None,
exclude_keywords: List[str] | None = None,
show_progress: bool = False,
) -> DatasetGenerator:
"""Generate dataset from documents."""
if service_context is None:
service_context = _get_default_service_context()
nodes = service_context.node_parser.get_nodes_from_documents(documents)
# use node postprocessor to filter nodes
required_keywords = required_keywords or []
exclude_keywords = exclude_keywords or []
node_postprocessor = KeywordNodePostprocessor(
service_context=service_context,
required_keywords=required_keywords,
exclude_keywords=exclude_keywords,
)
node_with_scores = [NodeWithScore(node=node) for node in nodes]
node_with_scores = node_postprocessor.postprocess_nodes(node_with_scores)
nodes = [node_with_score.node for node_with_score in node_with_scores]
return cls(
nodes=nodes,
service_context=service_context,
num_questions_per_chunk=num_questions_per_chunk,
text_question_template=text_question_template,
text_qa_template=text_qa_template,
question_gen_query=question_gen_query,
show_progress=show_progress,
)
async def _agenerate_dataset(
self,
nodes: List[BaseNode],
num: int | None = None,
generate_response: bool = False,
) -> QueryResponseDataset:
"""Node question generator."""
query_tasks = []
queries: Dict[str, str] = {}
responses_dict: Dict[str, str] = {}
if self._show_progress:
from tqdm.asyncio import tqdm_asyncio
async_module = tqdm_asyncio
else:
async_module = asyncio
summary_indices: List[SummaryIndex] = []
for node in nodes:
if num is not None and len(queries) >= num:
break
index = SummaryIndex.from_documents(
[
Document(
text=node.get_content(metadata_mode=self._metadata_mode),
metadata=node.metadata,
)
],
service_context=self.service_context,
)
query_engine = index.as_query_engine(
service_context=self.service_context,
text_qa_template=self.text_question_template,
use_async=True,
)
task = query_engine.aquery(
self.question_gen_query,
)
query_tasks.append(task)
summary_indices.append(index)
responses = await async_module.gather(*query_tasks)
for idx, response in enumerate(responses):
result = str(response).strip().split("\n")
cleaned_questions = [
re.sub(r"^\d+[\).\s]", "", question).strip() for question in result
]
cleaned_questions = [
question for question in cleaned_questions if len(question) > 0
]
cur_queries = {
str(uuid.uuid4()): question for question in cleaned_questions
}
queries.update(cur_queries)
if generate_response:
index = summary_indices[idx]
qr_tasks = []
cur_query_items = list(cur_queries.items())
cur_query_keys = [query_id for query_id, _ in cur_query_items]
for query_id, query in cur_query_items:
qa_query_engine = index.as_query_engine(
service_context=self.service_context,
text_qa_template=self.text_qa_template,
)
qr_task = qa_query_engine.aquery(query)
qr_tasks.append(qr_task)
qr_responses = await async_module.gather(*qr_tasks)
for query_id, qa_response in zip(cur_query_keys, qr_responses):
responses_dict[query_id] = str(qa_response)
else:
pass
query_ids = list(queries.keys())
if num is not None:
query_ids = query_ids[:num]
# truncate queries, responses to the subset of query ids
queries = {query_id: queries[query_id] for query_id in query_ids}
if generate_response:
responses_dict = {
query_id: responses_dict[query_id] for query_id in query_ids
}
return QueryResponseDataset(queries=queries, responses=responses_dict)
async def agenerate_questions_from_nodes(self, num: int | None = None) -> List[str]:
"""Generates questions for each document."""
dataset = await self._agenerate_dataset(
self.nodes, num=num, generate_response=False
)
return dataset.questions
async def agenerate_dataset_from_nodes(
self, num: int | None = None
) -> QueryResponseDataset:
"""Generates questions for each document."""
return await self._agenerate_dataset(
self.nodes, num=num, generate_response=True
)
def generate_questions_from_nodes(self, num: int | None = None) -> List[str]:
"""Generates questions for each document."""
return asyncio.run(self.agenerate_questions_from_nodes(num=num))
def generate_dataset_from_nodes(
self, num: int | None = None
) -> QueryResponseDataset:
"""Generates questions for each document."""
return asyncio.run(self.agenerate_dataset_from_nodes(num=num))
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {
"text_question_template": self.text_question_template,
"text_qa_template": self.text_qa_template,
}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "text_question_template" in prompts:
self.text_question_template = prompts["text_question_template"]
if "text_qa_template" in prompts:
self.text_qa_template = prompts["text_qa_template"]
| [
"上下文信息如下。\n\"\n\"\n---------------------\n{context_str}\n-------------------- --\n\"\n\"给出上下文信息而不是先验知识。\n\"\n\"仅根据以下查询生成问题。\n\"\n\"{query_str}\n\"\n"
] |
2024-01-10 | cbmchat/llama_index | indices~postprocessor~optimizer.py | """Optimization related classes and functions."""
import logging
from typing import Callable, List, Optional
from llama_index.bridge.pydantic import Field, PrivateAttr
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.postprocessor.types import BaseNodePostprocessor
from llama_index.indices.query.embedding_utils import get_top_k_embeddings
from llama_index.indices.query.schema import QueryBundle
from llama_index.schema import MetadataMode, NodeWithScore
logger = logging.getLogger(__name__)
class SentenceEmbeddingOptimizer(BaseNodePostprocessor):
"""Optimization of a text chunk given the query by shortening the input text."""
percentile_cutoff: Optional[float] = Field(
description="Percentile cutoff for the top k sentences to use."
)
threshold_cutoff: Optional[float] = Field(
description="Threshold cutoff for similiarity for each sentence to use."
)
_embed_model: BaseEmbedding = PrivateAttr()
_tokenizer_fn: Callable[[str], List[str]] = PrivateAttr()
def __init__(
self,
embed_model: Optional[BaseEmbedding] = None,
percentile_cutoff: Optional[float] = None,
threshold_cutoff: Optional[float] = None,
tokenizer_fn: Optional[Callable[[str], List[str]]] = None,
):
"""Optimizer class that is passed into BaseGPTIndexQuery.
Should be set like this:
.. code-block:: python
from llama_index.optimization.optimizer import Optimizer
optimizer = SentenceEmbeddingOptimizer(
percentile_cutoff=0.5
this means that the top 50% of sentences will be used.
Alternatively, you can set the cutoff using a threshold
on the similarity score. In this case only sentences with a
similarity score higher than the threshold will be used.
threshold_cutoff=0.7
these cutoffs can also be used together.
)
query_engine = index.as_query_engine(
optimizer=optimizer
)
response = query_engine.query("<query_str>")
"""
self._embed_model = embed_model or OpenAIEmbedding()
if tokenizer_fn is None:
import nltk.data
import os
from llama_index.utils import get_cache_dir
cache_dir = get_cache_dir()
nltk_data_dir = os.environ.get("NLTK_DATA", cache_dir)
# update nltk path for nltk so that it finds the data
if nltk_data_dir not in nltk.data.path:
nltk.data.path.append(nltk_data_dir)
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt", download_dir=nltk_data_dir)
tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
tokenizer_fn = tokenizer.tokenize
self._tokenizer_fn = tokenizer_fn
super().__init__(
percentile_cutoff=percentile_cutoff,
threshold_cutoff=threshold_cutoff,
)
@classmethod
def class_name(cls) -> str:
return "SentenceEmbeddingOptimizer"
def postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Optimize a node text given the query by shortening the node text."""
if query_bundle is None:
return nodes
for node_idx in range(len(nodes)):
text = nodes[node_idx].node.get_content(metadata_mode=MetadataMode.LLM)
split_text = self._tokenizer_fn(text)
if query_bundle.embedding is None:
query_bundle.embedding = (
self._embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
)
text_embeddings = self._embed_model._get_text_embeddings(split_text)
num_top_k = None
threshold = None
if self.percentile_cutoff is not None:
num_top_k = int(len(split_text) * self.percentile_cutoff)
if self.threshold_cutoff is not None:
threshold = self.threshold_cutoff
top_similarities, top_idxs = get_top_k_embeddings(
query_embedding=query_bundle.embedding,
embeddings=text_embeddings,
similarity_fn=self._embed_model.similarity,
similarity_top_k=num_top_k,
embedding_ids=list(range(len(text_embeddings))),
similarity_cutoff=threshold,
)
if len(top_idxs) == 0:
raise ValueError("Optimizer returned zero sentences.")
top_sentences = [split_text[idx] for idx in top_idxs]
logger.debug(f"> Top {len(top_idxs)} sentences with scores:\n")
if logger.isEnabledFor(logging.DEBUG):
for idx in range(len(top_idxs)):
logger.debug(
f"{idx}. {top_sentences[idx]} ({top_similarities[idx]})"
)
nodes[node_idx].node.set_content(" ".join(top_sentences))
return nodes
| [] |
2024-01-10 | datasnakes/biopython | Bio~Align~Applications~_Guidance2.py | # -*- coding: utf-8 -*-
# Copyright 2017 by Rob Gilmore and Shaurita Hutchins. All rights reserved.
# Based on ClustalOmega wrapper copyright 2011 by Andreas Wilm.
#
# Wrapper for Guidance2 by Rob Gilmore (2017). http://guidance.tau.ac.il/ver2/
# Used _ClustalOmega.py as template.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Command line wrapper for the multiple alignment program, GUIDANCE2.
It weights, filters or masks unreliably aligned positions in multiple
sequence alignments.
"""
from __future__ import print_function
import os
from Bio.Application import _Option, AbstractCommandline
class Guidance2Commandline(AbstractCommandline):
"""Command line wrapper for GUIDANCE2.
http://guidance.tau.ac.il/ver2/
https://github.com/grabear/guidance
Notes
-----
Last checked against version: 1.2.0
References
----------
Sela, I., Ashkenazy, H., Katoh, K. and Pupko, T. (2015)
GUIDANCE2: accurate detection of unreliable alignment regions
accounting for the uncertainty of multiple parameters.
Nucleic Acids Research, 2015 Jul 1; 43 (Web Server issue): W7-W14.;
https://doi.org/10.1093/nar/gkq443
Landan, G., and D. Graur. (2008). Local reliability measures from
sets of co-optimal multiple sequence alignments. Pac Symp Biocomput
13:15-24; http://psb.stanford.edu/psb-online/proceedings/psb08/abstracts/2008_p15.html
Examples
--------
>>> from Bio.Align.Applications import Guidance2Commandline
>>> import os
>>> seqFile = "ADGRB1.ffn"
>>> msaProgram = "CLUSTALW"
>>> seqType = "codon"
>>> outDir = os.getcwd()
>>> Guidance2_cline = Guidance2Commandline(seqFile, msaProgram, seqType, str(outDir), bootstraps=20, seqCutoff=0.63, colCutoff=0.9, outOrder='as_input', dataset='ADGRB1')
>>> print(Guidance2_cline)
perl guidance.pl --seqFile HTR1A.ffn --msaProgram CLUSTALW --seqType codon --outDir ~/Guidance2/data --bootstraps 20 --seqCutoff 0.63 --colCutoff 0.9 --outOrder as_input --dataset ADGRB1
You would typically run the command line with Guidance2_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="guidance", **kwargs):
"""Initialize the class."""
self.parameters = \
[
# Required Parameters
_Option(['--seqFile', 'seqFile'],
"Input sequence file in FASTA format",
filename=True, equate=False, is_required=True,
checker_function=lambda x: x in ['.fasta', 'fna', '.ffn', '.faa', '.fra'] and os.path.isfile(x)),
_Option(['--msaProgram', 'msaProgram'],
"Which MSA program to use",
equate=False, is_required=True,
checker_function=lambda x: x in ['MAFFT', 'PRANK', 'CLUSTALW', 'MUSCLE']),
_Option(['--seqType', 'seqType'],
"Type of sequences for alignment (amino acids, nucleotides, or codons)",
equate=False, is_required=True,
checker_function=lambda x: x in ['aa', 'nuc', 'codon']),
_Option(['--outDir', 'outDir'],
"Output directory that will be created "
"automatically and hold all output files [please provid full (and not relative) path]",
filename=True, equate=False, is_required=True),
# Optional Parameters
_Option(['--program', 'program'],
"[GUIDANCE2|GUIDANCE|HoT] Default=GUIDANCE2",
equate=False,
checker_function=lambda x: x in ["GUIDANCE2", "GUIDANCE", "HoT"]),
_Option(['--bootstraps', 'bootstraps'],
"Number of bootstrap iterations (only for GUIDQANCE). Defaut=100",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(['--genCode', 'genCode'],
"Genetic code identifier (only for codon sequences). Default=1 \
1) Nuclear Standard\
2) Mitochondria Vertebrate\
3) Mitochondria Yeast\
4) Mitochondria Protozoan\
5) Mitochondria Invertebrate\
6) Nuclear Ciliate\
9) Mitochondria Echinoderm\
10) Nuclear Euplotid\
13) Mitochondria Ascidian\
14) Mitochondria Flatworm \
15) Nuclear Blepharisma",
equate=False,
checker_function=lambda x: isinstance(x, int)),
_Option(['--outOrder', 'outOrder'],
"[aligned|as_input] default=aligned",
equate=False,
checker_function=lambda x: x in ['aligned',
'as_input']),
_Option(['--msaFile', 'msaFile'],
"Input alignment file - not recommended",
filename=True, equate=False,
checker_function=lambda x: os.path.isfile(x)),
# Confidence scores
_Option(['--seqCutoff', 'seqCutoff'],
"Confidence cutoff between 0 to 1. Default=0.6",
equate=False,
checker_function=lambda x: isinstance(x, (int, float))),
_Option(['--colCutoff', 'colCutoff'],
"Confidence cutoff between 0 to 1. Default=0.93",
equate=False,
checker_function=lambda x: isinstance(x, (int, float))),
# Alignment Programs
_Option(['--mafft', 'mafft'],
"path to mafft executable. Default=mafft",
filename=True, equate=False,
checker_function=lambda x: os.path.isfile(x)),
_Option(['--prank', 'prank'],
"path to prank executable. Default=prank",
filename=True, equate=False,
checker_function=lambda x: os.path.isfile(x)),
_Option(['--muscle', 'muscle'],
"path to muscle executable. default=muscle",
filename=True, equate=False,
checker_function=lambda x: os.path.isfile(x)),
_Option(['--pagan', 'pagan'],
"path to pagan executable, default=pagan",
filename=True, equate=False,
checker_function=lambda x: os.path.isfile(x)),
_Option(['--ruby', 'ruby'],
"path to ruby executable. default=ruby",
filename=True, equate=False,
checker_function=lambda x: os.path.isfile(x)),
# Miscellaneous
_Option(['--dataset', 'dataset'],
"Unique name for the Dataset - will be used as prefix to outputs (default=MSA)",
equate=False),
_Option(['--MSA_Param', 'MSA_Param'],
"passing parameters for the alignment program e.g -F to prank. "
"To pass parameter containning '-' in it, add \ before each '-' e.g. \-F for PRANK",
equate=False),
_Option(['--proc_num', 'proc_num'],
"number of processors to use (default=1)",
equate=False,
checker_function=lambda x: isinstance(x, int))
]
AbstractCommandline.__init__(self, cmd, **kwargs)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| [] |
2024-01-10 | datasnakes/biopython | Bio~Align~Applications~__init__.py | # Copyright 2009 by Peter Cock & Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Alignment command line tool wrappers."""
from ._Muscle import MuscleCommandline
from ._Clustalw import ClustalwCommandline
from ._ClustalOmega import ClustalOmegaCommandline
from ._Prank import PrankCommandline
from ._Mafft import MafftCommandline
from ._Dialign import DialignCommandline
from ._Probcons import ProbconsCommandline
from ._TCoffee import TCoffeeCommandline
from ._MSAProbs import MSAProbsCommandline
from ._Guidance2 import Guidance2Commandline
# Make this explicit, then they show up in the API docs
__all__ = ("MuscleCommandline",
"ClustalwCommandline",
"ClustalOmegaCommandline",
"PrankCommandline",
"MafftCommandline",
"DialignCommandline",
"ProbconsCommandline",
"TCoffeeCommandline",
"MSAProbsCommandline",
"Guidance2Commandline"
)
| [] |
2024-01-10 | epicdelia/Tinderbot | tinderbot.py | from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import openai
import random
from selenium.common.exceptions import StaleElementReferenceException
import time
from config import email, password, api_key
openai.api_key = api_key # your api key
prompt = "write a haiku about "
def generate_tinder_message():
prompts = [
"write a haiku about ",
"write a great pick up line for someone named ",
"Compose a message of love for ",
"Write a tinder message to ",
"Write an icebreaker to "
]
return random.choice(prompts)
def generate_intro(prompt, name):
response = openai.Completion.create(
engine="text-davinci-002",
prompt= prompt + name,
temperature=0.5,
max_tokens=500
)
quote = response.choices[0].text.strip()
return quote
class TinderBot():
def __init__(self):
self.driver = webdriver.Chrome()
def open_tinder(self):
sleep(2)
self.driver.get('https://tinder.com')
login_button = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[contains(text(), "Log in")]')))
login_button.click()
sleep(5)
self.facebook_login()
sleep(6)
try:
allow_location_button = self.driver.find_element('xpath', '//*[@id="t-1917074667"]/main/div/div/div/div[3]/button[1]')
allow_location_button.click()
except:
print('no location popup')
try:
notifications_button = self.driver.find_element('xpath', '/html/body/div[2]/main/div/div/div/div[3]/button[2]')
notifications_button.click()
except:
print('no notification popup')
def facebook_login(self):
# find and click FB login button
login_with_facebook = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[contains(text(), "Log in with Facebook")]')))
login_with_facebook.click()
# save references to main and FB windows
sleep(8)
base_window = self.driver.window_handles[0]
fb_popup_window = self.driver.window_handles[1]
# switch to FB window
self.driver.switch_to.window(fb_popup_window)
try:
cookies_accept_button = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[contains(text(), "Accept Cookies")]')))
cookies_accept_button.click()
except:
print('no cookies')
sleep(10)
email_field = self.driver.find_element(By.NAME, 'email')
pw_field = self.driver.find_element(By.NAME, 'pass')
login_button = self.driver.find_element(By.NAME, 'login')
email_field.send_keys(email)
pw_field.send_keys(password)
login_button.click()
self.driver.switch_to.window(base_window)
try:
allow_location_button_again = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[contains(text(), "Allow")]')))
allow_location_button_again.click()
except:
print('no location popup')
try:
enable_button = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[contains(text(), "Enable")]')))
enable_button.click()
except:
print('no location enable')
def right_swipe(self):
doc = self.driver.find_element('xpath', '//*[@id="Tinder"]/body')
doc.send_keys(Keys.ARROW_RIGHT)
def left_swipe(self):
doc = self.driver.find_element('xpath', '//*[@id="Tinder"]/body')
doc.send_keys(Keys.ARROW_LEFT)
def auto_swipe(self):
while True:
sleep(2)
try:
self.right_swipe()
except:
self.close_match()
def close_match(self):
match_popup = self.driver.find_element('xpath', '//*[@id="modal-manager-canvas"]/div/div/div[1]/div/div[3]/a')
match_popup.click()
def get_matches(self):
match_profiles = self.driver.find_elements('class name', 'matchListItem')
print(str(match_profiles))
message_links = []
for profile in match_profiles:
if profile.get_attribute('href') == 'https://tinder.com/app/my-likes' or profile.get_attribute('href') == 'https://tinder.com/app/likes-you':
continue
match_name = profile.find_element(By.CLASS_NAME, 'Ell')
name = match_name.text
print("got matches")
print(name)
message_links.append((name, profile.get_attribute('href')))
return message_links
def send_messages_to_matches(self):
links = self.get_matches()
for name, link in links:
self.send_message(name, link)
def send_message(self, name, link):
self.driver.get(link)
sleep(5)
text_area = self.driver.find_element('xpath', '/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/div/div[1]/div/div/div[3]/form/textarea')
print("sending message")
message = generate_intro(generate_tinder_message(), name)
text_area.send_keys(message)
sleep(10)
# text_area.send_keys(Keys.ENTER)
bot = TinderBot()
bot.open_tinder()
sleep(10)
# bot.auto_swipe()
# bot.send_messages_to_matches() | [
"write a haiku about PLACEHOLDER",
"write a haiku about ",
"['write a haiku about ', 'write a great pick up line for someone named ', 'Compose a message of love for ', 'Write a tinder message to ', 'Write an icebreaker to ']"
] |
2024-01-10 | MichalZawalski/RL | src~mcts~alpacka_changes~networks~tensorflow.py | """Network interface implementation using the TF v1 framework."""
import warnings
import gin
import numpy as np
import tensorflow as tf
from alpacka.networks import core
class TFMetaGraphNetwork(core.Network):
"""Fixed network loaded from the TF v1 MetaGraph checkpoint."""
def __init__(self, network_signature,
model_path=gin.REQUIRED,
x_name='ppo2_model/Ob:0',
y_name='ppo2_model/pi_1/add:0'):
"""Initialize TF session from MetaGraph.
Args:
network_signature (NetworkSignature): Network signature.
model_path (string): Path to a saved model. It's a common part of
the three files with extensions: .meta, .index, .data.
x_name (string): Name of the input placeholder.
Default for PPO2 from OpenAI Baselines.
y_name (string): Name of the output tensor.
Default for PPO2 from OpenAI Baselines.
"""
super().__init__(network_signature)
# Recall to legacy execution engine and create a tf.Session.
tf.compat.v1.disable_eager_execution()
self._sess = tf.compat.v1.Session()
tf.compat.v1.keras.backend.set_session(self._sess)
# Import meta graph and restore checkpoint.
self._saver = tf.compat.v1.train.import_meta_graph(model_path + '.meta')
self._saver.restore(self._sess, model_path)
# Get input and output ops.
graph = tf.compat.v1.get_default_graph()
self._x = graph.get_tensor_by_name(x_name)
self._y = graph.get_tensor_by_name(y_name)
self._batch_size = self._x.shape[0]
# TODO(pj): Add training...
# Test the restored model compliance with the network signature.
assert self._x.shape[1:] == network_signature.input.shape
assert self._x.dtype == network_signature.input.dtype
assert self._y.shape[1:] == network_signature.output.shape
assert self._y.dtype == network_signature.output.dtype
if self._batch_size is not None:
warnings.warn(
'The input batch dimension has fixed size ({}), you should save'
' your graph with the batch dimension set to None.'.format(
self._batch_size))
def predict(self, inputs):
batch_size = inputs.shape[0]
if self._batch_size is not None and batch_size < self._batch_size:
# Handle an input batch size lower than the model fixed batch size.
inputs = np.resize(inputs, (self._batch_size, ) + inputs.shape[1:])
return self._sess.run(self._y, feed_dict={self._x: inputs})[:batch_size]
@property
def params(self):
return self._sess.run(tf.compat.v1.trainable_variables())
@params.setter
def params(self, new_params):
for t, v in zip(tf.compat.v1.trainable_variables(), new_params):
tf.compat.v1.keras.backend.set_value(t, v)
def save(self, checkpoint_path):
self._saver.save(sess=self._sess,
save_path=checkpoint_path,
global_step=tf.compat.v1.train.get_global_step())
def restore(self, checkpoint_path):
self._saver.restore(self._sess, checkpoint_path)
| [] |
2024-01-10 | alexbraic/replit_100PythonDays | days~day95.py | # get the latest 5 news from newsAPI
# summarize each article content in 2-3 words using openAi
# in Spotify API, search for tracks that have similar names
#+to the article summary
# show the title and preview_url for 1 track for each summary
import requests, json, os
import openai
from requests.auth import HTTPBasicAuth
# secrets kept in Repl
API_KEY = os.environ['API_KEY']
# openai auth (expired credits so will not generate summary)
openai.apikey = os.environ['openAI']
openai.organization = os.environ['organizationID']
openai.Model.list()
# newsAPI ===========================================
# send the requests and get the latest nes
country = "us"
url = f"https://newsapi.org/v2/top-headlines?country={country}&apiKey={API_KEY}"
response = requests.get(url)
articles = response.json()
#print(json.dumps(articles, indent=2))
i = 0
for article in articles["articles"]:
if i == 5:
break
#print(article["title"])
#print(article["content"])
#print()
i += 1
# open ai ===========================================
# create openAi task and url
prompt = f'Summarize the following article in 2 or 3 words.\n {article["content"]}'
response = openai.Completion.create(model="text-davinci-002",
prompt=prompt,
temperature=0,
max_tokens=6)
# print the summary
#print(response["choices"][0]["text"].strip())
# use the summary to search for a track on Spotify
summary = response["choices"][0]["text"].strip()
# spotify ===========================================
# instanciate the client auth variables
client_id = os.environ['CLIENT_ID']
client_secret = os.environ['CLIENT_SECRET']
# build the post request that gets the access token
get_token_url = "https://accounts.spotify.com/api/token"
data = {'grant_type': 'client_credentials'}
auth = HTTPBasicAuth(client_id, client_secret)
# post request to get access token
resp = requests.post(get_token_url, data=data, auth=auth)
#print(resp.json())
# access token and bearer
access_token = resp.json()['access_token']
headers = {'Authorization': f'Bearer {access_token}'}
# build the url to search for tracks once auth is complete
s_url = "https://api.spotify.com/v1/search"
search = f"?q={summary}&type=track&limit=1"
full_URL = f'{s_url}{search}'
# save the response in a variable
songs = requests.get(full_URL, headers=headers)
songs_result = songs.json()
# output the result main points: name and preview_url
for track in songs_result["tracks"]["items"]:
print(track["name"])
print(track["preview_url"])
print()
| [
"Summarize the following article in 2 or 3 words.\n PLACEHOLDER"
] |
2024-01-10 | alexbraic/replit_100PythonDays | days~day94.py | # get the latest 5 news of the day and create a summary using openAi
# create news API/openAi account and get api key
# send request to newsAPI and output the first 5 articles
# create a summarise url for openAi and output the summary
import requests, json, os
import openai
# auth
# in Replit, the keys are saved as secrets
# in other projects, the keys can be held and referenced in
#+a different file
API_KEY = os.environ['API_KEY'] # newsAPI
openai.apikey = os.environ['openAI'] # openAi
openai.organization = os.environ['organizationID'] # openAi
openai.Model.list()
# change country to get news articles from different ones
country = "ie"
# create request url
url = f"https://newsapi.org/v2/top-headlines?country={country}&apiKey={API_KEY}"
# send request to newsAPI
result = requests.get(url)
data = result.json()
print(json.dumps(data, indent=2)) # print this to check result
# loop through articles and get main info for first 5 articles
i = 1
for article in data["articles"]:
if i == 6:
break
print(f'\t{i}: {article["title"]}')
print(article["url"])
print(article["content"])
# create openAi task and url
prompt = f'Summarize the following article {article["content"]}'
response = openai.Completion.create(model="text-davinci-002",
prompt=prompt,
temperature=0,
max_tokens=6)
# print the summary
print(response["choices"][0]["text"].strip())
i += 1
| [
"Summarize the following article PLACEHOLDER"
] |
2024-01-10 | samsonleegh/convai_smile | src~inference_archive.py | from simpletransformers.conv_ai import ConvAIModel, ConvAIArgs
from transformers import cached_path
from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer
# model trained on https://colab.research.google.com/drive/1M6bjD4zbn8FHY83ZOap6d7vMqXKCLuPi?authuser=2#scrollTo=1SEeIDdsDmlJ
interact_args = {
"cache_dir": "./cache_dir/",
"max_length":50,
"do_sample":True, #sampling, False will set to greedy encoding
"temperature":0.7,
"top_k":0,
"top_p":0.9,
"max_history":5
}
tuned_model = ConvAIModel("gpt", "./saved_model",
use_cuda=False,
args=interact_args)
def generate_reply(personality, history, user_input):
response, history = tuned_model.interact_single(user_input,
history,
personality=personality)
return response, history
# USER_INPUT = "I am suffering from anxiety and depression. What should I do?"
# generate_reply(personality=[], history=[], user_input=USER_INPUT)
if __name__ == '__main__':
PERSONALITY = []
HISTORY = []
while True:
USER_INPUT = input()
response, history = generate_reply(PERSONALITY, history=HISTORY, user_input=USER_INPUT)
print(response, history)
HISTORY = HISTORY + history
| [] |
2024-01-10 | abhishek-dangol/AI_FullStack | copycat.py | from multiprocessing.sharedctypes import Value
from typing import List
import os
import openai
import argparse
import re
MAX_INPUT_LENGTH = 12
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i", type=str, required=True)
args = parser.parse_args()
user_input = args.input
print(f"User input: {user_input}")
if validate_length(user_input):
generate_branding_snippet(user_input)
generate_keywords(user_input)
else:
raise ValueError(f"The word you entered is too long! Must be under {MAX_INPUT_LENGTH} characters! Try again.")
def validate_length(prompt: str) -> bool:
return len(prompt) <= MAX_INPUT_LENGTH
def generate_branding_snippet(prompt: str):
# Load your API key from an environment variable or secret management service
openai.api_key = os.getenv("OPENAI_API_KEY")
enriched_prompt = f"Generate upbeat branding snippet for {prompt}: "
print(enriched_prompt)
response = openai.Completion.create(model="text-davinci-002", prompt=enriched_prompt, temperature=0, max_tokens=32)
# Extract output text
branding_text: str = response["choices"][0]["text"]
# Strip whitespace
branding_text = branding_text.strip()
# Add ... to truncated statements
last_char = branding_text[-1]
if last_char not in {".", "!", "?"}:
branding_text += "..."
print(f"Snippet: {branding_text}")
return branding_text
def generate_keywords(prompt: str) -> List[str]:
# Load your API key from an environment variable or secret management service
openai.api_key = os.getenv("OPENAI_API_KEY")
enriched_prompt = f"Generate related branding keywords for {prompt}: "
print(enriched_prompt)
response = openai.Completion.create(model="text-davinci-002", prompt=enriched_prompt, temperature=0, max_tokens=32)
# Extract output text
keywords_text: str = response["choices"][0]["text"]
# Strip whitespace
keywords_text = keywords_text.strip()
keywords_array = re.split(",|\n|;|-}", keywords_text)
keywords_array = [k.lower().strip() for k in keywords_array]
keywords_array = [k for k in keywords_array if len(k) > 0]
print(f"Keywords: {keywords_array}")
return keywords_array
if __name__ == "__main__":
main() | [
"Generate related branding keywords for PLACEHOLDER: ",
"Generate upbeat branding snippet for PLACEHOLDER: "
] |
2024-01-10 | vibuverma/steam-reviews-topic-modeling | model~utils.py | from collections import Counter
from sklearn.metrics import silhouette_score
import umap
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from gensim.models.coherencemodel import CoherenceModel
import numpy as np
import os
def get_topic_words(token_lists, labels, k=None):
"""
get top words within each topic from clustering results
"""
if k is None:
k = len(np.unique(labels))
topics = ['' for _ in range(k)]
for i, c in enumerate(token_lists):
topics[labels[i]] += (' ' + ' '.join(c))
word_counts = list(map(lambda x: Counter(x.split()).items(), topics))
# get sorted word counts
word_counts = list(map(lambda x: sorted(x, key=lambda x: x[1], reverse=True), word_counts))
# get topics
topics = list(map(lambda x: list(map(lambda x: x[0], x[:10])), word_counts))
return topics
def get_coherence(model, token_lists, measure='c_v'):
"""
Get model coherence from gensim.models.coherencemodel
:param model: Topic_Model object
:param token_lists: token lists of docs
:param topics: topics as top words
:param measure: coherence metrics
:return: coherence score
"""
if model.method == 'LDA':
cm = CoherenceModel(model=model.ldamodel, texts=token_lists, corpus=model.corpus, dictionary=model.dictionary,
coherence=measure)
else:
topics = get_topic_words(token_lists, model.cluster_model.labels_)
cm = CoherenceModel(topics=topics, texts=token_lists, corpus=model.corpus, dictionary=model.dictionary,
coherence=measure)
return cm.get_coherence()
def get_silhouette(model):
"""
Get silhouette score from model
:param model: Topic_Model object
:return: silhouette score
"""
if model.method == 'LDA':
return
lbs = model.cluster_model.labels_
vec = model.vec[model.method]
return silhouette_score(vec, lbs)
def plot_proj(embedding, lbs):
"""
Plot UMAP embeddings
:param embedding: UMAP (or other) embeddings
:param lbs: labels
"""
n = len(embedding)
counter = Counter(lbs)
for i in range(len(np.unique(lbs))):
plt.plot(embedding[:, 0][lbs == i], embedding[:, 1][lbs == i], '.', alpha=0.5,
label='cluster {}: {:.2f}%'.format(i, counter[i] / n * 100))
plt.legend()
def visualize(model):
"""
Visualize the result for the topic model by 2D embedding (UMAP)
:param model: Topic_Model object
"""
if model.method == 'LDA':
return
reducer = umap.UMAP()
print('Calculating UMAP projection ...')
vec_umap = reducer.fit_transform(model.vec[model.method])
print('Calculating UMAP projection. Done!')
plot_proj(vec_umap, model.cluster_model.labels_)
dr = '/contextual_topic_identification/docs/images/{}/{}'.format(model.method, model.id)
if not os.path.exists(dr):
os.makedirs(dr)
plt.savefig(dr + '/2D_vis')
def get_wordcloud(model, token_lists, topic):
"""
Get word cloud of each topic from fitted model
:param model: Topic_Model object
:param sentences: preprocessed sentences from docs
"""
if model.method == 'LDA':
return
print('Getting wordcloud for topic {} ...'.format(topic))
lbs = model.cluster_model.labels_
tokens = ' '.join([' '.join(_) for _ in np.array(token_lists)[lbs == topic]])
wordcloud = WordCloud(width=800, height=560,
background_color='white', collocations=False,
min_font_size=10).generate(tokens)
# plot the WordCloud image
plt.figure(figsize=(8, 5.6), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
dr = '/contextual_topic_identification/docs/images/{}/{}'.format(model.method, model.id)
if not os.path.exists(dr):
os.makedirs(dr)
plt.savefig(dr + '/Topic' + str(topic) + '_wordcloud')
print('Getting wordcloud for topic {}. Done!'.format(topic)) | [] |
2024-01-10 | thisismattmiller/eatGPT | build_embeddings.py | import numpy as np
import openai
import pandas as pd
import pickle
import tiktoken
COMPLETIONS_MODEL = "text-davinci-003"
EMBEDDING_MODEL = "text-embedding-ada-002"
# prompt = "Who won the 2020 Summer Olympics men's high jump?"
# x = openai.Completion.create(
# prompt=prompt,
# temperature=0,
# max_tokens=300,
# model=COMPLETIONS_MODEL
# )["choices"][0]["text"].strip(" \n")
# print(x)
def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]:
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"]
def compute_doc_embeddings(df: pd.DataFrame) -> dict[tuple[str, str], list[float]]:
"""
Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
"""
return {
idx: get_embedding(r.content) for idx, r in df.iterrows()
}
df = pd.read_csv('docs.csv')
df = df.set_index(["title", "heading"])
embeddings = compute_doc_embeddings(df)
file = open('embeddings.binary', 'wb')
pickle.dump(embeddings, file)
file.close()
| [] |
2024-01-10 | thisismattmiller/eatGPT | lambda_function.py | import pickle
import numpy as np
import openai
import tiktoken
import pandas as pd
COMPLETIONS_MODEL = "text-davinci-003"
EMBEDDING_MODEL = "text-embedding-ada-002"
def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]:
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"]
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query: str, contexts: dict[(str, str), np.array]) -> list[(float, (str, str))]:
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
MAX_SECTION_LEN = 2500
SEPARATOR = "\n* "
ENCODING = "gpt2" # encoding for text-davinci-003
encoding = tiktoken.get_encoding(ENCODING)
separator_len = len(encoding.encode(SEPARATOR))
def construct_prompt(question: str, context_embeddings: dict, df: pd.DataFrame) -> str:
"""
Fetch relevant
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
# print(document_section)
chosen_sections_len += document_section.tokens + separator_len
if chosen_sections_len > MAX_SECTION_LEN and len(chosen_sections) > 0:
# print('breaking')
break
chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
print(chosen_sections)
chosen_sections_indexes.append(str(section_index))
# Useful diagnostic information
# print(f"Selected {len(chosen_sections)} document sections:")
# print("\n".join(chosen_sections_indexes))
header = """Answer the question as truthfully as possible using the provided context, don't answer in the first person, use full names, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
return { 'prompt': header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:", 'docs': most_relevant_document_sections }
COMPLETIONS_API_PARAMS = {
# We use temperature of 0.0 because it gives the most predictable, factual answer.
"temperature": 0.0,
"max_tokens": 300,
"model": COMPLETIONS_MODEL,
}
def answer_query_with_context(
query: str,
df: pd.DataFrame,
document_embeddings: dict[(str, str), np.array],
show_prompt: bool = False
) -> str:
prompt = construct_prompt(
query,
document_embeddings,
df
)
docs = prompt['docs']
prompt = prompt['prompt']
if show_prompt:
print(prompt)
response = openai.Completion.create(
prompt=prompt,
timeout=5.0,
**COMPLETIONS_API_PARAMS
)
return { 'response' : response["choices"][0]["text"].strip(" \n"), 'docs' :docs}
def lambda_handler(event, context):
df = pd.read_csv('docs.csv')
df = df.set_index(["title", "heading"])
file = open("embeddings.binary",'rb')
document_embeddings = pickle.load(file)
file.close()
print(event)
results = answer_query_with_context(event['queryStringParameters']['q'], df, document_embeddings)
docs = []
for doc in results['docs']:
docs.append(doc[1][1])
if len(docs) >=10:
break
results['docs'] = docs
return results
| [] |
2024-01-10 | Jintao-Huang/ml_alg | libs~_env.py | import openai
import os
# 以下环境变量需要用户自定义设置, 这里为了自己方便进行导入
TORCH_HOME = os.environ.get("TORCH_HOME", None)
DATASETS_PATH = os.environ.get("DATASETS_PATH", "./.dataset")
HF_HOME = os.environ.get("HF_HOME", None)
CACHE_HOME = os.environ.get("CACHE_HOME", "./.cache")
PROXIES = {
'http': '127.0.0.1:7890',
'https': '127.0.0.1:7890'
}
HEADERS = {
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
}
#
_OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if _OPENAI_API_KEY:
openai.api_key: str = os.getenv("OPENAI_API_KEY")
_OPENAI_ORG = os.getenv("OPENAI_ORG")
if _OPENAI_ORG is not None:
openai.organization: str = os.getenv("OPENAI_ORG")
# | [] |
2024-01-10 | Jintao-Huang/ml_alg | libs~_types.py | import os
import shutil
import sys
import heapq
import bisect
import operator
import pickle
import json
import nltk
from nltk import sent_tokenize, word_tokenize
import math
import statistics as stat
import time
import datetime as dt
import logging
from logging import Logger, Handler
import random
import threading as td
import multiprocessing as mp
import re
from re import Match
import unittest as ut
import platform
import csv
from enum import Enum
from inspect import getmembers, isfunction, ismethod
from pprint import pprint
from dataclasses import dataclass, field
#
import gradio as gr
from warnings import filterwarnings
from operator import itemgetter, attrgetter
from pprint import pprint
from itertools import (
chain, accumulate, product, permutations, combinations, combinations_with_replacement,
compress, starmap, zip_longest
)
from functools import partial, cache, lru_cache, cmp_to_key, reduce
from copy import copy, deepcopy
from argparse import ArgumentParser, Namespace
from queue import Queue, SimpleQueue, PriorityQueue
from hashlib import sha256
from typing import (
Literal, List, Tuple, Dict, Set, Callable, Optional, Union, Any,
Deque, NamedTuple, DefaultDict, Counter, OrderedDict,
Sequence, Mapping, Iterable, Iterator, TypeVar, Generic, Generator
)
from typing_extensions import TypeAlias, Self
from types import SimpleNamespace
# from collections import deque, namedtuple, OrderedDict, defaultdict, Counter # use typing
from _collections_abc import dict_items, dict_keys, dict_values
from contextlib import contextmanager
from numbers import Number
from fractions import Fraction
import pyximport
#
from flask import Flask, url_for, render_template
import yaml
from sortedcontainers import SortedList, SortedDict, SortedSet
from tqdm import tqdm
import numpy as np
from numpy import ndarray
from numpy.random import RandomState
from numpy.typing import NDArray, ArrayLike
import pandas as pd
from pandas import DataFrame, Series
#
import numba
from numba import jit, njit, vectorize, guvectorize
from numba.core.types import (
void, uint8, int32, int64, float16, float32, float64, boolean, string,
ListType, List as ReflectList, Array
)
from numba.typed.typedlist import List as TypedList
from numba.typed.typeddict import Dict as TypedDict
from numba import typeof
#
from urllib.parse import urljoin
from urllib.error import HTTPError
from urllib.request import urlretrieve
import requests
from lxml import etree
#
from xml.etree.ElementTree import ElementTree as ET, Element
from lxml.etree import _Element as Element2
from selenium.webdriver.remote.webelement import WebElement
#
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import Keys, Proxy, ActionChains
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.common.exceptions import NoSuchElementException
#
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from matplotlib.colors import to_rgb
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
from PIL import Image
import cv2 as cv
#
from sklearn.model_selection import cross_val_predict
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans, DBSCAN
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.manifold import TSNE
#
import torch
from torch import Tensor, dtype as Dtype, device as Device, Generator as TGenerator
from torch.nn import Module
import torch.linalg as tl
from torch.optim import Optimizer
from torch.nn.parameter import Parameter
import torch.cuda as cuda
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd.function import FunctionCtx, Function
from torch.optim import lr_scheduler as lrs
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
import torch.nn.init as init
from torch.nn.utils.clip_grad import clip_grad_norm_, clip_grad_value_
from torch.nn.utils.fusion import fuse_conv_bn_eval, fuse_linear_bn_eval
from torch.nn.parallel import DataParallel as DP, DistributedDataParallel as DDP
from torch.utils.data import (
Dataset, IterableDataset, TensorDataset,
Sampler, RandomSampler, SequentialSampler, BatchSampler, DistributedSampler,
DataLoader, default_collate, get_worker_info,
random_split
)
from torch.nn.utils.rnn import pad_sequence
from torch.utils.checkpoint import checkpoint
import torch.utils.data as tud
from torch.utils.tensorboard.writer import SummaryWriter
from torch.nn.modules.module import _IncompatibleKeys as IncompatibleKeys
import torch.distributed as dist
from torch.multiprocessing.spawn import spawn
from torch.cuda.amp.grad_scaler import GradScaler
from torch.amp.autocast_mode import autocast
#
from peft.utils.config import PeftConfig
from peft.peft_model import PeftModelForCausalLM, PeftModel
from peft.tuners.lora import LoraConfig
#
import torchvision.transforms._functional_tensor as tvtF_t
import torchvision.transforms._functional_pil as tvtF_pil
import torchvision.transforms.functional as tvtF
from torchvision.transforms.functional import InterpolationMode, pil_modes_mapping
import torchvision as tv
import torchvision.transforms as tvt
import torchvision.datasets as tvd
from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, STL10
from torchvision.utils import make_grid, save_image
import torchvision.models as tvm
from torchvision.models import ResNet, DenseNet, resnet18
#
import lightning.pytorch as pl
from lightning.pytorch import LightningDataModule, LightningModule, Trainer
from lightning.pytorch.callbacks import Callback
from lightning.pytorch.cli import LightningCLI
from lightning_utilities.core.rank_zero import rank_zero_only
#
from transformers.pipelines import pipeline
from transformers.generation.streamers import TextStreamer
from transformers.models.auto.modeling_auto import (AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM,
AutoModelForSequenceClassification, AutoModelForQuestionAnswering,
AutoModelForMultipleChoice, AutoModelForTokenClassification)
from transformers.models.auto.tokenization_auto import AutoTokenizer
from transformers.models.auto.configuration_auto import AutoConfig
from transformers.data.data_collator import DataCollatorForLanguageModeling, DataCollatorWithPadding
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.modeling_utils import PreTrainedModel
from transformers.configuration_utils import PretrainedConfig
from datasets.load import load_dataset
from datasets.combine import concatenate_datasets
#
from modelscope.msdatasets import MsDataset
from modelscope.hub.snapshot_download import snapshot_download
#
from torchmetrics import MeanMetric, Metric
from torchmetrics.classification.accuracy import Accuracy
from torchmetrics.classification.precision_recall import Precision, Recall
from torchmetrics.classification.f_beta import F1Score, FBetaScore
from torchmetrics.classification.auroc import AUROC
from torchmetrics.classification.average_precision import AveragePrecision
# 使用libs_ml中的metrics. (比torchmetrics.functional更快)
#
import gym
from gym import Env
import openai
from flask import Flask, url_for, render_template, redirect, request
from markupsafe import Markup
from wtforms import StringField, Form, SubmitField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Length
#
import mini_lightning as ml
# _remove_keys, _key_add_suffix
| [] |
2024-01-10 | taisazero/socratic-debugging-benchmark | inference~gpt_inference.py | import time
import sys
from transformers import GPT2Tokenizer
import openai
import os
class GPT3Model(object):
def __init__(self, model_name, api_key, logger=None):
self.model_name = model_name
try:
openai.api_key = api_key
except Exception:
pass
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2-xl")
self.logger=logger
def do_inference(self, input, output, max_length=2048):
losses = []
data = input + output
response = self.gpt3(data)
out = response["choices"][0]
assert input + output == out["text"]
i = 0
# find the end position of the input...
i = out['logprobs']['text_offset'].index(len(input) - 1)
if i == 0:
i = i + 1
print('eval text', out['logprobs']['tokens'][i: -1])
loss = -sum(out['logprobs']["token_logprobs"][i:-1]) # ignore the last '.'
avg_loss = loss / (len(out['logprobs']['text_offset']) - i-1) # 1 is the last '.'
print('avg_loss: ', avg_loss)
losses.append(avg_loss)
return avg_loss
def gpt3(self, prompt, max_len=0, temp=0, num_log_probs=0, echo=True, n=None):
response = None
received = False
while not received:
try:
response = openai.Completion.create(engine=self.model_name,
prompt=prompt,
max_tokens=max_len,
temperature=temp,
logprobs=num_log_probs,
echo=echo,
stop='\n',
n=n)
print('prompt: ',prompt)
received = True
except:
error = sys.exc_info()[0]
if error == openai.error.InvalidRequestError:
# something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(1)
return response
class ChatGPTModel(object):
def __init__(self, model_name= 'gpt-3.5-turbo',
api_key= None, logger=None,
steering_prompt='',
generation_args = {
"max_tokens": 256,
"temperature": 0.0,
"top_p": 0.0,
"frequency_penalty": 0,
"presence_penalty": 0,
"stop": None,
"n": 1, # number of responses to return,
"stream": False,
}):
self.model_name = model_name
try:
openai.api_key = api_key
except Exception:
pass
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2-xl")
self.logger=logger
self.steering_prompt = steering_prompt
self.generation_args = generation_args
def do_inference(self, input, output, max_length=2048):
raise NotImplementedError
def generate (self, prompt, echo=False):
return self.chatgpt(prompt, echo)
def generate_turn(self, turns, echo=False, user_identifier='user', system_identifier='system'):
response = None
received = False
messages = [
{"role": "system", "content": self.steering_prompt},
]
for i, turn in enumerate(turns):
speaker, text = turn
if speaker == user_identifier:
messages.append({"role": "user", "content": text})
elif speaker == system_identifier:
messages.append({"role": "assistant", "content": text})
while not received:
try:
completion = openai.ChatCompletion.create(
model=self.model_name,
messages=messages,
**self.generation_args
)
if self.generation_args['n'] > 1:
# return all responses
return list(set([c.message['content'] for c in completion.choices]))
if echo:
print(completion.choices)
print('prompt: ', turns)
received = True
response = completion.choices[0].message
except:
error = sys.exc_info()[0]
if error == openai.error.InvalidRequestError:
# something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{turns}\n\n")
assert False
print("API error:", error)
time.sleep(10)
return response ['content']
def chatgpt(self, prompt, echo=False):
response = None
received = False
while not received:
try:
completion = openai.ChatCompletion.create(
model=self.model_name,
messages=[
{"role": "system", "content": self.steering_prompt},
{"role": "user", "content": prompt},
],
**self.generation_args
)
if self.generation_args['n'] > 1:
# return all responses
return list(set([c.message['content'] for c in completion.choices]))
if echo:
print(completion.choices[0].message)
print('prompt: ', prompt)
received = True
response = completion.choices[0].message
except:
error = sys.exc_info()[0]
if error == openai.error.InvalidRequestError:
# something is wrong: e.g. prompt too long
print(f"InvalidRequestError\nPrompt passed in:\n\n{prompt}\n\n")
assert False
print("API error:", error)
time.sleep(10)
return response ['content']
# unit tests
import pytest
def test_chatgpt_generation():
generation_args = {
"max_tokens": 256,
"temperature": 0.0,
"top_p": 0.0,
"frequency_penalty": 0,
"presence_penalty": 0,
"stop": None,
"n": 3, # number of responses to return,
"stream": False,
}
oai_key = open('.streamlit/oai_key.txt', 'r').read()
model = ChatGPTModel(generation_args=generation_args, api_key=oai_key)
prompt = "Hello, how are you?"
response = model.generate(prompt)
assert response is not None
assert len(response) > 0
print(response)
def test_gpt4_generation():
generation_args = {
"max_tokens": 256,
"temperature": 0.0,
"top_p": 0.0,
"frequency_penalty": 0,
"presence_penalty": 0,
"stop": None,
"n": 2, # number of responses to return,
"stream": False,
}
oai_key = open('.streamlit/oai_key.txt', 'r').read()
model = ChatGPTModel(model_name = 'gpt-4', generation_args=generation_args, api_key=oai_key)
prompt = "Hello, how are you?"
response = model.generate(prompt)
assert response is not None
assert len(response) > 0
print(response)
if __name__ == "__main__":
test_chatgpt_generation()
test_gpt4_generation()
| [
"Hello, how are you?"
] |
2024-01-10 | doodledood/decision-assistant | steps.py | import itertools
import json
import os
from typing import List, Dict, Tuple, Optional, Callable, Generator
import ahpy
import questionary
from chatflock.backing_stores import InMemoryChatDataBackingStore, LangChainMemoryBasedChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors import RoundRobinChatConductor, LangChainBasedAIChatConductor
from chatflock.parsing_utils import chat_messages_to_pydantic
from chatflock.participants import LangChainBasedAIChatParticipant, UserChatParticipant
from chatflock.renderers import TerminalChatRenderer, NoChatRenderer
from chatflock.structured_string import Section, StructuredString
from chatflock.use_cases.request_response import get_response
from chatflock.web_research import WebSearch
from halo import Halo
from langchain.chat_models import ChatOpenAI
from langchain.llms.openai import OpenAI
from langchain.memory import ConversationSummaryBufferMemory
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
from presentation import generate_decision_report_as_html, save_html_to_file, open_html_file_in_browser
from ranking.ranking import topsis_score, normalize_label_value
from state import DecisionAssistantState
def fix_string_based_on_list(s: str, l: List[str]) -> Optional[str]:
for item in l:
if item.lower() in s.lower():
return item
return None
class Criterion(BaseModel):
name: str = Field(description='The name of the criterion. Example: "Affordability".')
description: str = Field(
description='A description of the criterion. Includes the sub-criteria and how to assign scale values to data.')
scale: List[str] = Field(
description='The scale of the criterion, from worst to best. Labels only. No numerical value, '
'no explainations. Example: "Very Expensive".')
class GoalIdentificationResult(BaseModel):
goal: str = Field(description='The identified decision-making goal.')
class CriteriaIdentificationResult(BaseModel):
criteria: List[Criterion] = Field(description='The identified criteria for evaluating the decision.')
class AlternativeListingResult(BaseModel):
alternatives: List[str] = Field(description='The identified alternatives for the decision.')
class CriteriaResearchQueriesResult(BaseModel):
criteria_research_queries: Dict[str, List[str]] = Field(
description='The research queries for each criteria. Key is the criterion name, value is a list of research '
'queries for that criterion.')
class AlternativeCriteriaResearchFindingsResult(BaseModel):
updated_research_findings: str = Field(
description='The updated and aggregated research findings for the alternative and criterion. Formatted as '
'rich markdown with all the citations and links in place.')
label: str = Field(
description='The label assigned to the alternative and criterion based on the aggregated research findings '
'and user discussion. The label is assigned from the scale of the criterion (name of the label).')
class Alternative(BaseModel):
name: str = Field(description='The name of the alternative.')
criteria_data: Optional[Dict[str, Tuple[str, int]]] = Field(
description='The research data collected for each criterion for this alternative. Key is the name of the '
'criterion. Value is a tuple of the research data as text and the assigned value based on the '
'scale of the criterion.')
def gather_unique_pairwise_comparisons(
criteria_names: List[str],
predict_fn: Optional[Callable[[str, List[str], Dict[Tuple[str, str], str]], str]] = None,
previous_comparisons: Optional[List[Tuple[Tuple[str, str], float]]] = None,
on_question_asked: Optional[Callable[[Tuple[str, str], float], None]] = None) \
-> Generator[Tuple[Tuple[str, str], float], None, None]:
choices = {
'Absolutely less important': 1 / 9,
'A lot less important': 1 / 7,
'Notably less important': 1 / 5,
'Slightly less important': 1 / 3,
'Just as important': 1,
'Slightly more important': 3,
'Notably more important': 5,
'A lot more important': 7,
'Absolutely more important': 9
}
value_to_choice = {v: k for k, v in choices.items()}
ordered_choice_names = [choice[0] for choice in sorted(choices.items(), key=lambda x: x[1])]
comparisons = dict(previous_comparisons)
all_combs = list(itertools.combinations(criteria_names, 2))
for i, (label1, label2) in enumerate(all_combs):
if (label1, label2) in comparisons:
continue
question_text = f'({i + 1}/{len(all_combs)}) How much more important is "{label1}" when compared to "{label2}"?'
if predict_fn is not None:
comparisons_with_str_choice = {k: value_to_choice[v] for k, v in comparisons.items()}
predicted_answer = predict_fn(question_text, ordered_choice_names, comparisons_with_str_choice)
else:
predicted_answer = ordered_choice_names[len(ordered_choice_names) // 2]
answer = questionary.select(
question_text,
choices=ordered_choice_names,
default=predicted_answer,
).ask()
labels = (label1, label2)
value = choices[answer]
comparisons[labels] = value
yield labels, value
def identify_goal(chat_model: ChatOpenAI, state: DecisionAssistantState,
tools: Optional[List[BaseTool]] = None, spinner: Optional[Halo] = None):
if state.data.get('goal') is not None:
return
ai = LangChainBasedAIChatParticipant(
name='Decision-Making Goal Identifier',
role='Decision-Making Goal Identifier',
personal_mission='Identify a clear and specific decision-making goal from the user\'s initial vague statement.',
other_prompt_sections=[
Section(
name='Process',
list=[
'Start by greeting the user and asking for their decision-making goal. Example: "Hello, '
'what is your decision-making goal?"',
'If the goal is not clear, ask for clarification and refine the goal.',
'If the goal is clear, confirm it with the user.',
]
),
Section(
name='User Decision Goal',
list=[
'One and only one decision goal can be identified.',
'The goal should be clear and specific.',
'The goal should be a decision that can be made by the user.',
'No need to go beyond the goal. The next step will be to identify alternatives and criteria for '
'the decision.'
]
),
Section(
name='Last Message',
list=[
'After the goal has been identified, the last message should include the goal.'
'It should end with the word TERMINATE at the end of the message to signal the end of the chat.'
]
)
],
tools=tools,
chat_model=chat_model,
spinner=spinner)
user = UserChatParticipant(name='User')
participants = [ai, user]
chat = Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
initial_participants=participants
)
chat_conductor = RoundRobinChatConductor()
_ = chat_conductor.initiate_dialog(chat=chat)
goal = chat_messages_to_pydantic(
chat_messages=chat.get_messages(),
chat_model=chat_model,
output_schema=GoalIdentificationResult,
spinner=spinner
)
goal = goal.goal
state.data = {**state.data, **dict(goal=goal)}
def identify_alternatives(chat_model: ChatOpenAI, tools: List[BaseTool],
state: DecisionAssistantState, spinner: Optional[Halo] = None):
if state.data.get('alternatives') is not None:
return
ai = LangChainBasedAIChatParticipant(
name='Decision-Making Alternative Consultant',
role='Decision-Making Alternative Consultant',
personal_mission='Assist the user in identifying alternatives for the decision-making process.',
other_prompt_sections=[
Section(
name='Interaction Schema',
list=[
'This is the second part of the decision-making process, after the goal has been identified. No '
'need for a greeting.',
'Start by asking the user for alternatives they had in mind for the decision.',
'Assist the user in generating alternatives if they are unsure or struggle to come up with '
'options or need help researching more ideas. You can use the web search tool and your own '
'knowledge for this.',
'List the final list of alternatives and confirm with the user before moving on to the next step.'
]
),
Section(
name='Requirements',
list=[
'At the end of the process there should be at least 2 alternatives and no more than 20.'
]
),
Section(
name='Alternatives',
list=[
'The alternatives should be clear and specific.',
'The alternatives should be options that the user can choose from.',
'Naming the alternatives should be done in a way that makes it easy to refer to them later on.',
'For example, for a goal such as "Decide which school to go to": The alternative "Go to school X" '
'is bad, while "School X" is good.'
]
),
Section(
name='The Last Message',
list=[
'The last response should include the list of confirmed alternatives.',
'It should end with the word TERMINATE at the end of the message to signal the end of the chat.'
]
)
],
tools=tools,
chat_model=chat_model,
spinner=spinner)
user = UserChatParticipant(name='User')
participants = [user, ai]
chat = Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
initial_participants=participants
)
chat_conductor = RoundRobinChatConductor()
_ = chat_conductor.initiate_dialog(chat=chat, initial_message=str(StructuredString(
sections=[
Section(name='Goal', text=state.data['goal']),
]
)))
output = chat_messages_to_pydantic(
chat_messages=chat.get_messages(),
chat_model=chat_model,
output_schema=AlternativeListingResult,
spinner=spinner
)
alternatives = output.alternatives
state.data = {**state.data, **dict(alternatives=alternatives)}
def identify_criteria(chat_model: ChatOpenAI, tools: List[BaseTool],
state: DecisionAssistantState, spinner: Optional[Halo] = None):
if state.data.get('criteria') is not None:
return
shared_prompt_sections = [
Section(
name='Process Stage',
text='This is the third part of the decision-making process, after the goal and alternatives have been '
'identified. No need for a greeting.'
),
]
criteria_brainstormer = LangChainBasedAIChatParticipant(
name='Criteria Brainstormer',
role='Criteria Brainstormer',
personal_mission='Brainstorm and iterate on the best set of criteria for the decision-making process.',
other_prompt_sections=shared_prompt_sections + [
Section(
name='Criteria Identification Methodology',
list=[
'Start by suggesting an initial set of criteria that is as orthogonal, non-overlapping, '
'and comprehensive as possible (including the scale, sub-criteria, and description).',
'Iterate on the criteria with the critic until you both are satisfied with them.',
'Once you both are satisfied, confirm the criteria with the user and ask for feedback.',
'The criteria should include the scale of the criterion description of the criterion, including '
'the sub-criteria and how to assign scale values to data.'
]
),
Section(name='Criteria Description', list=[
'The description should include the sub-criteria and how to assign scale values to data. That means '
'that each criterion should include concrete measures (like indexes, specific indicators, statistics '
'if there are any) to allow for the researcher to accurately compare alternatives later on',
'The measures should be as objective and specific as possible.',
'These measures should be reflective of the sub-criteria and the scale of the criterion.'
]),
Section(name='Scale Definition', list=[
'The scale should be a list of labels only. No numerical values, no explainations. Example: '
'"Very Expensive".',
'The scale should be ordered from worst to best. Example: "Very Expensive" should come before '
'"Expensive".',
'Make should the values for the scale are roughly evenly spaced out. Example: "Very '
'Expensive" should be roughly as far from "Expensive" as "Expensive" is from "Fair".'
]),
Section(name='General Formatting', list=[
'Make sure all the criteria are formatted nicely in markdown format and are easy to read, including '
'their description, sub-criteria and explainations.'
]),
Section(
name='Requirements',
sub_sections=[
Section(
name='Criteria',
list=[
'At the end of the process there MUST be at least 1 criterion and no more than 15 criteria.',
]),
Section(
name='Scales',
list=[
'Scales MUST be on at least 2-point scale and no more than 7-point scale.'
]
)
]
),
Section(
name='The Last Message',
list=[
'The last response should include the list of confirmed criteria and their respective scales, '
'numbered from 1 to N, where N is the best outcome for the criteria.'
]
)
],
tools=tools,
chat_model=chat_model,
spinner=spinner)
criteria_critic = LangChainBasedAIChatParticipant(
name='Criteria Critic',
role='Criteria Critic',
personal_mission='Critique the criteria and provide feedback on what to improve.',
other_prompt_sections=shared_prompt_sections + [
Section(
name='Criteria Critiquing',
list=[
'When critiquing the criteria, make sure they are orthogonal, non-overlapping, and comprehensive.',
'When critiquing the scales, make sure they are ordered from worst to best, evenly spaced out, '
'and have labels that make sense.',
],
sub_sections=[
Section(
name='Questions to ask yourself',
list=[
'Are all the criteria named such that the worst option on their respective potential '
'scales is the worst outcome for the decision, and vise-versa for the last label/best '
'outcome?',
'Are there any criteria that are redundant or duplicated?',
'Are there any criteria that are missing to create a comprehensive set of criteria?',
'Is the criteria set maximally orthogonal and non-overlapping?',
'Are there any criteria that are too subjective or vague?',
'Same thing for the sub-criteria within the main criteria.',
'Is there at least 1 criterion identified?',
'Are there no more than 15 criteria identified?',
'Are all the descriptions for the criteria clear and easy to understand?',
'Do all the descriptions include concrete measures (like indexes, metrics, statistics, '
'etc. - if possible) that can be effectively used to'
'research and compare alternatives later on?',
'Are all the labels on a scale ordered from worst to best?',
'Can a scale be simplified such that it is easier to assign a value to a piece of data '
'based on it?',
'Is a scale too simple such that it is not useful for the decision-making process?',
'Are all the scales on a 2-point to 7-point scale?'
]
)
]
)
],
tools=tools,
chat_model=chat_model,
spinner=spinner)
user = UserChatParticipant(name='User')
participants = [user, criteria_brainstormer, criteria_critic]
try:
memory = ConversationSummaryBufferMemory(
llm=chat_model,
max_token_limit=OpenAI.modelname_to_contextsize(chat_model.model_name)
)
backing_store = LangChainMemoryBasedChatDataBackingStore(memory=memory)
except ValueError:
backing_store = InMemoryChatDataBackingStore()
chat = Chat(
backing_store=backing_store,
renderer=TerminalChatRenderer(),
initial_participants=participants
)
chat_conductor = LangChainBasedAIChatConductor(
chat_model=chat_model,
goal='Identify clear well-defined criteria and their respective scales for the decision.',
interaction_schema=(
'1. The Criteria Brainstormer suggests an initial set of criteria (including description and scales) '
'based on the user input.\n'
'2. The Criteria Critic critiques the criteria suggested and suggests improvements.\n'
'3. The Criteria Brainstormer iterates on the criteria until they think they are good enough and ask the '
'user for feedback.\n'
'4. If the user is not satisfied with the criteria, go back to step 1, refining the criteria based on the '
'user feedback.\n'
'5. If the user is satisfied with the criteria, the criteria identification process is complete. The '
'Criteria Brainstormer should present the final list of criteria and their respective scales to the '
'user.\n'
'6. The chat should end.'),
)
_ = chat_conductor.initiate_dialog(chat=chat, initial_message=str(StructuredString(
sections=[
Section(name='Goal', text=state.data['goal']),
Section(name='Alternatives', list=state.data['alternatives']),
]
)))
output = chat_messages_to_pydantic(
chat_messages=chat.get_messages(),
chat_model=chat_model,
output_schema=CriteriaIdentificationResult,
spinner=spinner
)
criteria = output.model_dump()['criteria']
state.data = {**state.data, **dict(criteria=criteria)}
def prioritize_criteria(chat_model: ChatOpenAI, tools: List[BaseTool],
state: DecisionAssistantState, spinner: Optional[Halo] = None):
if state.data.get('criteria_weights') is not None:
return
criteria_comparisons = state.data.get('criteria_comparisons', {})
criteria_comparisons = {tuple(json.loads(labels)): value for labels, value in criteria_comparisons.items()}
criteria_comparisons = list(criteria_comparisons.items())
criteria_names = [criterion['name'] for criterion in state.data['criteria']]
def predict_answer(question: str, choices: List[str], previous_answers: Dict[Tuple[str, str], str]):
ai = LangChainBasedAIChatParticipant(
name='Decision-Making Pairwise Criteria Comparisons Predictor',
role='Decision-Making Pairwise Criteria Comparisons Predictor',
personal_mission='Predict the most likely option the user will choose based on previous pairwise '
'comparisons between criteria.',
other_prompt_sections=[
Section(
name='Steps',
list=[
'Retrieve the user\'s previous pairwise comparisons between criteria.',
'Analyze the list of options.',
'Make a prediction about the user\'s most likely choice based on the analyzed data.',
'Return the predicted option.'
],
list_item_prefix=None
),
Section(
name='Note',
list=[
'Only one option should be predicted.',
'The prediction should be the best possible guess based on the user\'s previous answers.',
'If you really do not know or it is impossible to guess, return the middle option.'
]
),
Section(
name='Output',
text='Only the label of the best-guess option'
),
Section(
name='Output Format',
text='"...\nPREDICTION: CHOICE" Where CHOICE is a verbatim label from the choices given only.'
)
],
tools=tools,
chat_model=chat_model,
spinner=spinner)
user = UserChatParticipant(name='User')
participants = [user, ai]
predicted_answer, _ = get_response(query=str(StructuredString(
sections=[
Section(
name='Previous Pairwise Comparisons',
list=[
f'How much more important is "{criterion_1}" when compared to "{criterion_2}"? -> {value}' for
(criterion_1, criterion_2), value in previous_answers.items()
]
),
Section(
name='Comparison to Predict',
text=question
),
Section(
name='Choices',
list=choices
)
]
)), answerer=ai, renderer=NoChatRenderer())
parts = predicted_answer.split('PREDICTION:', 2)
if len(parts) != 2:
return choices[len(choices) // 2]
predicted_answer = parts[1].strip()
predicted_answer = fix_string_based_on_list(predicted_answer, choices)
if predicted_answer is None:
return choices[len(choices) // 2]
return predicted_answer
for labels, value in gather_unique_pairwise_comparisons(
criteria_names,
predict_fn=predict_answer,
previous_comparisons=criteria_comparisons):
criteria_comparisons.append((labels, value))
state.data = {**state.data, **dict(
criteria_comparisons={json.dumps(labels): value for labels, value in criteria_comparisons})}
yield state
state.data['criteria_weights'] = ahpy.Compare('Criteria', dict(criteria_comparisons)).target_weights
def generate_research_questions(chat_model: ChatOpenAI, tools: List[BaseTool],
state: DecisionAssistantState, spinner: Optional[Halo] = None):
if state.data.get('criteria_research_queries') is not None:
return
ai = LangChainBasedAIChatParticipant(
name='Decision-Making Process Researcher',
role='Decision-Making Process Researcher',
personal_mission='Generate a template for automated research queries for each criterion, whose answers can be '
'used as context when evaluating alternatives.',
other_prompt_sections=[
Section(
name='Process',
list=[
'This is the fifth part of the decision-making process, after the goal, alternatives and criteria, '
'have been identified. No need for a greeting.',
'For each criterion, generate relevant, orthogonal, and comprehensive set query templates.',
],
list_item_prefix=None
),
Section(
name='Query Templates',
list=[
'The query templates should capture the essence of the criterion based on the scale and how to '
'assign values.',
'The queries should be strategic and aim to minimize the number of questions while maximizing the '
'information gathered.',
'The list of queries should include counterfactual queries and make use of all knowledge of '
'information foraging and information literacy.',
'Each query template MUST include "{alternative}" in the template to allow for replacement with '
'various alternatives later.',
'If a criterion is purely subjective and nothing an be researched on it, it\'s ok to have 0 '
'queries about it.'
]
),
Section(
name='The Last Message',
list=[
'The last response should include the list of research query templates for each criterion.',
'It should end with the word TERMINATE at the end of the message to signal the end of the chat.'
]
)
],
tools=tools,
chat_model=chat_model,
spinner=spinner)
user = UserChatParticipant(name='User')
participants = [user, ai]
chat = Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
initial_participants=participants,
max_total_messages=2
)
chat_conductor = RoundRobinChatConductor()
_ = chat_conductor.initiate_dialog(chat=chat, initial_message=str(StructuredString(
sections=[
Section(name='Goal', text=state.data['goal']),
Section(name='Alternatives', list=state.data['alternatives']),
Section(name='Criteria',
sub_sections=[
Section(name=criterion['name'], text=criterion['description'], list=criterion['scale'],
list_item_prefix=None) for criterion in
state.data['criteria']
]),
]
)))
output = chat_messages_to_pydantic(
chat_messages=chat.get_messages(),
chat_model=chat_model,
output_schema=CriteriaResearchQueriesResult,
spinner=spinner
)
criteria_names = [criterion['name'] for criterion in state.data['criteria']]
output.criteria_research_queries = {fix_string_based_on_list(name, criteria_names): queries for name, queries in
output.criteria_research_queries.items()}
criteria_research_queries = output.model_dump()['criteria_research_queries']
state.data = {**state.data, **dict(criteria_research_queries=criteria_research_queries)}
def perform_research(chat_model: ChatOpenAI, web_search: WebSearch, n_search_results: int,
tools: List[BaseTool], state: DecisionAssistantState,
spinner: Optional[Halo] = None,
fully_autonomous: bool = True):
research_data = state.data.get('research_data')
if research_data is None:
research_data = {}
for alternative in state.data['alternatives']:
alternative_research_data = research_data.get(alternative)
if alternative_research_data is None:
alternative_research_data = {}
for i, criterion in enumerate(state.data['criteria']):
criterion_name = criterion['name']
criterion_research_questions = state.data['criteria_research_queries'][criterion_name]
alternative_criterion_research_data = alternative_research_data.get(criterion_name)
if alternative_criterion_research_data is None:
alternative_criterion_research_data = {'raw': {}, 'aggregated': {}}
# Already researched and aggregated, skip
if alternative_criterion_research_data['aggregated'] != {}:
continue
# Research data online for each query
for query in criterion_research_questions:
query = query.format(alternative=alternative)
# Already researched query, skip
if query in alternative_criterion_research_data['raw']:
continue
found_answer, answer = web_search.get_answer(query=query, n_results=n_search_results,
spinner=spinner)
if not found_answer:
alternative_criterion_research_data['raw'][query] = 'No answer found online.'
if spinner:
spinner.warn(f'No answer found for query "{query}".')
else:
alternative_criterion_research_data['raw'][query] = answer
alternative_research_data[criterion_name] = alternative_criterion_research_data
research_data[alternative] = alternative_research_data
state.data['research_data'] = research_data
yield state
# Do this separately, so all the automated research runs entirely before the user is asked to discuss the findings
for alternative in state.data['alternatives']:
alternative_research_data = research_data.get(alternative)
if alternative_research_data is None:
alternative_research_data = {}
for i, criterion in enumerate(state.data['criteria']):
criterion_name = criterion['name']
alternative_criterion_research_data = alternative_research_data[criterion_name]
# Already researched and aggregated, skip
if alternative_criterion_research_data['aggregated'] != {}:
continue
ai = LangChainBasedAIChatParticipant(
name='Decision-Making Process Researcher',
role='Decision-Making Process Researcher',
personal_mission='Refine research findings through user interaction and assign an accurate label '
'based on data, user input, and criteria.',
other_prompt_sections=[
Section(
name='Process',
list=[
'This is the sixth part of the decision-making process, after the goal, alternatives, '
'criteria, and research queries have been identified. No need for a '
'greeting.',
'Present the researched data to the user and assign a preliminary label & ask for feedback',
'Revise the research findings based on user input, until the user is satisfied with the '
'findings and label.',
],
),
Section(
name='Research Presentation',
list=[
'Maintain original findings if no new user input.',
'Mention the sources of the research findings as inline links.'
]
),
Section(
name='Label Assignment',
list=[
'Assign one label per criterion per alternative based on scale and value assignment '
'rules. A label should be a string only, e.g., "Very Expensive".',
'If unclear, make an educated guess based on data and user input.'
]
),
Section(
name='The First Message',
list=[
'Your first message should look something like this: "Here is what I found about {'
'alternative} for {criterion}:\n\n{research_findings}\n\nBecause {'
'reason_for_label_assignment}, I think the label for {alternative} for {criterion} should '
'be {label}. What do you think? Do you have anything else to add, clarify or change that '
'might affect this label?"'
]
),
Section(
name='The Last Message',
list=[
'The last response should include the refined research findings for a criterion\'s '
'alternative in rich markdown format with all the citations and links inline.',
'Does not include conversational fluff. Think about it like a research report.',
'Does not include the starting sentence: "Here is what I found about...". It should dive '
'straight into the refined findings.',
'It should end with the word TERMINATE at the end of the message to signal the end of the '
'chat.'
]
)
],
tools=tools,
chat_model=chat_model,
spinner=spinner)
user = UserChatParticipant(name='User')
participants = [user, ai]
chat = Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
initial_participants=participants,
max_total_messages=2 if fully_autonomous else None
)
chat_conductor = RoundRobinChatConductor()
_ = chat_conductor.initiate_dialog(chat=chat, initial_message=str(StructuredString(
sections=[
Section(name='Goal', text=state.data['goal']),
Section(name='Alternatives', list=state.data['alternatives']),
Section(name='Criterion',
sub_sections=[
Section(name=criterion_name, text=criterion['description'], list=criterion['scale'],
list_item_prefix=None)
]),
Section(name='Research Findings',
sub_sections=[
Section(name=query, text=answer) for query, answer in
alternative_criterion_research_data[
'raw'].items()
])
]
)))
criterion_full_research_data = chat_messages_to_pydantic(
chat_messages=chat.get_messages(),
chat_model=chat_model,
output_schema=AlternativeCriteriaResearchFindingsResult,
spinner=spinner
)
research_data[alternative][criterion_name]['aggregated'] = {
'findings': criterion_full_research_data.updated_research_findings,
'label': criterion_full_research_data.label
}
state.data['research_data'] = research_data
yield state
state.data = {**state.data, **dict(research_data=research_data)}
def analyze_data(state: DecisionAssistantState):
if state.data.get('scored_alternatives') is not None:
return
items = [state.data['research_data'][alternative] for alternative in state.data['alternatives']]
criteria_weights = state.data['criteria_weights']
criteria_names = [criterion['name'] for criterion in state.data['criteria']]
scores = topsis_score(items=items,
weights=criteria_weights,
value_mapper=lambda item, criterion: \
normalize_label_value(label=item[criterion]['aggregated']['label'],
label_list=state.data['criteria'][
criteria_names.index(criterion)]['scale'],
lower_bound=0.0,
upper_bound=1.0),
best_and_worst_solutions=(
{criterion['name']: {'aggregated': {'label': criterion['scale'][-1]}} for
criterion in state.data['criteria']},
{criterion['name']: {'aggregated': {'label': criterion['scale'][0]}} for
criterion in state.data['criteria']}
))
scored_alternatives = {alternative: score for alternative, score in zip(state.data['alternatives'], scores)}
state.data = {**state.data, **dict(scored_alternatives=scored_alternatives)}
def compile_data_for_presentation(state: DecisionAssistantState, report_file: str):
if os.path.exists(report_file):
return
enriched_alternatives = []
for alternative in state.data['alternatives']:
alternative_research_data = state.data['research_data'][alternative]
alternative_score = state.data['scored_alternatives'][alternative]
enriched_alternatives.append({
'name': alternative,
'score': alternative_score,
'criteria_data': alternative_research_data
})
html = generate_decision_report_as_html(
criteria=state.data['criteria'],
criteria_weights=state.data['criteria_weights'],
alternatives=enriched_alternatives,
goal=state.data['goal'])
save_html_to_file(html, report_file)
def present_report(state: DecisionAssistantState, report_file: str):
open_html_file_in_browser(report_file)
| [
"identified. No need for a greeting.",
"Process Stage",
"This is the third part of the decision-making process, after the goal and alternatives have been "
] |
2024-01-10 | iwasakishuto/paper-summary2slack | pkg~cli~arxiv2slack.py | # coding:utf-8
import argparse
import os
import random
import sys
import arxiv
import openai
from slack_sdk import WebClient as SlackClient
from slack_sdk.errors import SlackApiError
from ..utils import toRED
def get_arxiv_summary(result: arxiv.Result) -> str:
system = """与えられた論文の要点を3点のみでまとめ、以下のフォーマットで日本語で出力してください。
```
タイトルの日本語訳
・要点1
・要点2
・要点3
```
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system},
{"role": "user", "content": f"title: {result.title}\nbody: {result.summary}"},
],
temperature=0.25,
)
if isinstance(response, dict):
summary = response["choices"][0]["message"]["content"]
title, *body = summary.split("\n")
body = "\n".join(body)
return f"""発行日: {result.published.strftime("%Y-%m-%d %H:%M:%S")}
URL: {result.entry_id}
Title: "{result.title}"
タイトル: 「{title}」
-------------------------
{body}
"""
else:
return "Error"
def main(argv: list = sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Summarize arxiv papers with ChatGPT and post it to Slack.",
add_help=True,
)
parser.add_argument(
"-openai",
"--OPENAI-API-KEY",
type=str,
default=os.getenv("OPENAI_API_KEY", ""),
help="The openai's api key.",
)
parser.add_argument(
"-slack",
"--SLACK-API-TOKEN",
type=str,
default=os.getenv("SLACK_API_TOKEN", ""),
help="The slack api token.",
)
parser.add_argument(
"-channel",
"--SLACK-CHANNEL",
type=str,
default=os.getenv("SLACK_CHANNEL", ""),
help="Which channel to post the arxiv summary.",
)
parser.add_argument(
"-Q",
"--query",
type=str,
default=os.getenv("ARXIV_QUERY", "abs:GPT AND cat:cs.AI"), # Default: Abstract に "GPT" という文字を含む、AI関連の論文。
help="The search query of Arxiv. (See 'https://info.arxiv.org/help/api/user-manual.html#query_details'.)",
)
parser.add_argument("-N", "--num", type=int, default=3, help="How many papers to post.")
args = parser.parse_args(argv)
# Set openai API key.
openai.api_key = args.OPENAI_API_KEY
# Initialize the Slack Client.
client = SlackClient(token=args.SLACK_API_TOKEN)
# Search arxiv paper.
search = arxiv.Search(
query=args.query,
max_results=20,
sort_by=arxiv.SortCriterion.SubmittedDate,
sort_order=arxiv.SortOrder.Descending,
)
for i, result in enumerate(sorted(search.results(), key=lambda k: random.random()), start=1):
try:
message = f"今日の論文です! {i}本目\n" + get_arxiv_summary(result)
parent_message = client.chat_postMessage(channel=args.SLACK_CHANNEL, text=message)
child_message = client.chat_postMessage(
channel=args.SLACK_CHANNEL, text=result.summary, thread_ts=parent_message["ts"]
)
except SlackApiError as e:
print(f"Error posting message: {toRED(str(e))}")
if i >= args.num:
break
| [
"与えられた論文の要点を3点のみでまとめ、以下のフォーマットで日本語で出力してください。\n\n```\nタイトルの日本語訳\n・要点1\n・要点2\n・要点3\n```\n"
] |
2024-01-10 | leo4life2/Xhs-Post-Generator | finetuning~finetune-csv-creator.py | # coding=utf8
import csv
import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
PROMPT = "Write some posts for teaching people how to buy Paxlovid.\n\n"
def get_data():
all_data = []
with open("pax-cleaned.csv", mode='r', encoding='utf-8-sig') as f:
reader = csv.reader(f)
for row in reader:
all_data.append((row[0], row[1]))
return all_data
def create_csv():
# first row first col is "prompt", second col is "completion"
with open("finetune_data.csv", mode='w', encoding='utf-8-sig') as f:
writer = csv.writer(f)
writer.writerow(["prompt", "completion"])
all_data = get_data()
for title, content in all_data:
completion = f"标题:{title}\n内容:{content}\n\n"
writer.writerow([PROMPT, completion])
def main():
create_csv()
if __name__ == "__main__":
main() | [
"Write some posts for teaching people how to buy Paxlovid.\n\n"
] |
2024-01-10 | dA505819/nni | src~sdk~pynni~nni~ppo_tuner~ppo_tuner.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
ppo_tuner.py including:
class PPOTuner
"""
import copy
import logging
import numpy as np
from gym import spaces
import nni
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward
from .model import Model
from .util import set_global_seeds
from .policy import build_lstm_policy
logger = logging.getLogger('ppo_tuner_AutoML')
def _constfn(val):
"""
Wrap as function
"""
def f(_):
return val
return f
class ModelConfig:
"""
Configurations of the PPO model
"""
def __init__(self):
self.observation_space = None
self.action_space = None
self.num_envs = 0
self.nsteps = 0
self.ent_coef = 0.0
self.lr = 3e-4
self.vf_coef = 0.5
self.max_grad_norm = 0.5
self.gamma = 0.99
self.lam = 0.95
self.cliprange = 0.2
self.embedding_size = None # the embedding is for each action
self.noptepochs = 4 # number of training epochs per update
self.total_timesteps = 5000 # number of timesteps (i.e. number of actions taken in the environment)
self.nminibatches = 4 # number of training minibatches per update. For recurrent policies,
# should be smaller or equal than number of environments run in parallel.
class TrialsInfo:
"""
Informations of each trial from one model inference
"""
def __init__(self, obs, actions, values, neglogpacs, dones, last_value, inf_batch_size):
self.iter = 0
self.obs = obs
self.actions = actions
self.values = values
self.neglogpacs = neglogpacs
self.dones = dones
self.last_value = last_value
self.rewards = None
self.returns = None
self.inf_batch_size = inf_batch_size
#self.states = None
def get_next(self):
"""
Get actions of the next trial
"""
if self.iter >= self.inf_batch_size:
return None, None
actions = []
for step in self.actions:
actions.append(step[self.iter])
self.iter += 1
return self.iter - 1, actions
def update_rewards(self, rewards, returns):
"""
After the trial is finished, reward and return of this trial is updated
"""
self.rewards = rewards
self.returns = returns
def convert_shape(self):
"""
Convert shape
"""
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
self.obs = sf01(self.obs)
self.returns = sf01(self.returns)
self.dones = sf01(self.dones)
self.actions = sf01(self.actions)
self.values = sf01(self.values)
self.neglogpacs = sf01(self.neglogpacs)
class PPOModel:
"""
PPO Model
"""
def __init__(self, model_config, mask):
self.model_config = model_config
self.states = None # initial state of lstm in policy/value network
self.nupdates = None # the number of func train is invoked, used to tune lr and cliprange
self.cur_update = 1 # record the current update
self.np_mask = mask # record the mask of each action within one trial
set_global_seeds(None)
assert isinstance(self.model_config.lr, float)
self.lr = _constfn(self.model_config.lr)
assert isinstance(self.model_config.cliprange, float)
self.cliprange = _constfn(self.model_config.cliprange)
# build lstm policy network, value share the same network
policy = build_lstm_policy(model_config)
# Get the nb of env
nenvs = model_config.num_envs
# Calculate the batch_size
self.nbatch = nbatch = nenvs * model_config.nsteps # num of record per update
nbatch_train = nbatch // model_config.nminibatches # get batch size
# self.nupdates is used to tune lr and cliprange
self.nupdates = self.model_config.total_timesteps // self.nbatch
# Instantiate the model object (that creates act_model and train_model)
self.model = Model(policy=policy, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=model_config.nsteps, ent_coef=model_config.ent_coef, vf_coef=model_config.vf_coef,
max_grad_norm=model_config.max_grad_norm, np_mask=self.np_mask)
self.states = self.model.initial_state
logger.info('=== finished PPOModel initialization')
def inference(self, num):
"""
Generate actions along with related info from policy network.
observation is the action of the last step.
Parameters
----------
num: int
The number of trials to generate
Returns
-------
mb_obs : list
Observation of the ``num`` configurations
mb_actions : list
Actions of the ``num`` configurations
mb_values : list
Values from the value function of the ``num`` configurations
mb_neglogpacs : list
``neglogp`` of the ``num`` configurations
mb_dones : list
To show whether the play is done, always ``True``
last_values : tensorflow tensor
The last values of the ``num`` configurations, got with session run
"""
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], []
# initial observation
# use the (n+1)th embedding to represent the first step action
first_step_ob = self.model_config.action_space.n
obs = [first_step_ob for _ in range(num)]
dones = [True for _ in range(num)]
states = self.states
# For n in range number of steps
for cur_step in range(self.model_config.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, states, neglogpacs = self.model.step(cur_step, obs, S=states, M=dones)
mb_obs.append(obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
obs[:] = actions
if cur_step == self.model_config.nsteps - 1:
dones = [True for _ in range(num)]
else:
dones = [False for _ in range(num)]
#batch of steps to batch of rollouts
np_obs = np.asarray(obs)
mb_obs = np.asarray(mb_obs, dtype=np_obs.dtype)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(np_obs, S=states, M=dones)
return mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values
def compute_rewards(self, trials_info, trials_result):
"""
Compute the rewards of the trials in trials_info based on trials_result,
and update the rewards in trials_info
Parameters
----------
trials_info : TrialsInfo
Info of the generated trials
trials_result : list
Final results (e.g., acc) of the generated trials
"""
mb_rewards = np.asarray([trials_result for _ in trials_info.actions], dtype=np.float32)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
last_dones = np.asarray([True for _ in trials_result], dtype=np.bool) # ugly
for t in reversed(range(self.model_config.nsteps)):
if t == self.model_config.nsteps - 1:
nextnonterminal = 1.0 - last_dones
nextvalues = trials_info.last_value
else:
nextnonterminal = 1.0 - trials_info.dones[t+1]
nextvalues = trials_info.values[t+1]
delta = mb_rewards[t] + self.model_config.gamma * nextvalues * nextnonterminal - trials_info.values[t]
lastgaelam = delta + self.model_config.gamma * self.model_config.lam * nextnonterminal * lastgaelam
mb_advs[t] = lastgaelam # pylint: disable=unsupported-assignment-operation
mb_returns = mb_advs + trials_info.values
trials_info.update_rewards(mb_rewards, mb_returns)
trials_info.convert_shape()
def train(self, trials_info, nenvs):
"""
Train the policy/value network using trials_info
Parameters
----------
trials_info : TrialsInfo
Complete info of the generated trials from the previous inference
nenvs : int
The batch size of the (previous) inference
"""
# keep frac decay for future optimization
if self.cur_update <= self.nupdates:
frac = 1.0 - (self.cur_update - 1.0) / self.nupdates
else:
logger.warning('current update (self.cur_update) %d has exceeded total updates (self.nupdates) %d',
self.cur_update, self.nupdates)
frac = 1.0 - (self.nupdates - 1.0) / self.nupdates
lrnow = self.lr(frac)
cliprangenow = self.cliprange(frac)
self.cur_update += 1
states = self.states
assert states is not None # recurrent version
assert nenvs % self.model_config.nminibatches == 0
envsperbatch = nenvs // self.model_config.nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * self.model_config.nsteps).reshape(nenvs, self.model_config.nsteps)
for _ in range(self.model_config.noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (trials_info.obs, trials_info.returns, trials_info.dones,
trials_info.actions, trials_info.values, trials_info.neglogpacs))
mbstates = states[mbenvinds]
self.model.train(lrnow, cliprangenow, *slices, mbstates)
class PPOTuner(Tuner):
"""
PPOTuner, the implementation inherits the main logic of the implementation
[ppo2 from openai](https://github.com/openai/baselines/tree/master/baselines/ppo2), and is adapted for NAS scenario.
It uses ``lstm`` for its policy network and value network, policy and value share the same network.
"""
def __init__(self, optimize_mode, trials_per_update=20, epochs_per_update=4, minibatch_size=4,
ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, cliprange=0.2):
"""
Initialization, PPO model is not initialized here as search space is not received yet.
Parameters
----------
optimize_mode : str
maximize or minimize
trials_per_update : int
Number of trials to have for each model update
epochs_per_update : int
Number of epochs to run for each model update
minibatch_size : int
Minibatch size (number of trials) for the update
ent_coef : float
Policy entropy coefficient in the optimization objective
lr : float
Learning rate of the model (lstm network), constant
vf_coef : float
Value function loss coefficient in the optimization objective
max_grad_norm : float
Gradient norm clipping coefficient
gamma : float
Discounting factor
lam : float
Advantage estimation discounting factor (lambda in the paper)
cliprange : float
Cliprange in the PPO algorithm, constant
"""
self.optimize_mode = OptimizeMode(optimize_mode)
self.model_config = ModelConfig()
self.model = None
self.search_space = None
self.running_trials = {} # key: parameter_id, value: actions/states/etc.
self.inf_batch_size = trials_per_update # number of trials to generate in one inference
self.first_inf = True # indicate whether it is the first time to inference new trials
self.trials_result = [None for _ in range(self.inf_batch_size)] # results of finished trials
self.credit = 0 # record the unsatisfied trial requests
self.param_ids = []
self.finished_trials = 0
self.chosen_arch_template = {}
self.actions_spaces = None
self.actions_to_config = None
self.full_act_space = None
self.trials_info = None
self.all_trials = {} # used to dedup the same trial, key: config, value: final result
self.model_config.num_envs = self.inf_batch_size
self.model_config.noptepochs = epochs_per_update
self.model_config.nminibatches = minibatch_size
self.send_trial_callback = None
logger.info('Finished PPOTuner initialization')
def _process_nas_space(self, search_space):
actions_spaces = []
actions_to_config = []
for key, val in search_space.items():
if val['_type'] == 'layer_choice':
actions_to_config.append((key, 'layer_choice'))
actions_spaces.append(val['_value'])
self.chosen_arch_template[key] = None
elif val['_type'] == 'input_choice':
candidates = val['_value']['candidates']
n_chosen = val['_value']['n_chosen']
if n_chosen not in [0, 1, [0, 1]]:
raise ValueError('Optional_input_size can only be 0, 1, or [0, 1], but the pecified one is %s'
% (n_chosen))
if isinstance(n_chosen, list):
actions_to_config.append((key, 'input_choice'))
# FIXME: risk, candidates might also have None
actions_spaces.append(['None', *candidates])
self.chosen_arch_template[key] = None
elif n_chosen == 1:
actions_to_config.append((key, 'input_choice'))
actions_spaces.append(candidates)
self.chosen_arch_template[key] = None
elif n_chosen == 0:
self.chosen_arch_template[key] = []
else:
raise ValueError('Unsupported search space type: %s' % (val['_type']))
# calculate observation space
dedup = {}
for step in actions_spaces:
for action in step:
dedup[action] = 1
full_act_space = [act for act, _ in dedup.items()]
assert len(full_act_space) == len(dedup)
observation_space = len(full_act_space)
nsteps = len(actions_spaces)
return actions_spaces, actions_to_config, full_act_space, observation_space, nsteps
def _generate_action_mask(self):
"""
Different step could have different action space. to deal with this case, we merge all the
possible actions into one action space, and use mask to indicate available actions for each step
"""
two_masks = []
mask = []
for acts in self.actions_spaces:
one_mask = [0 for _ in range(len(self.full_act_space))]
for act in acts:
idx = self.full_act_space.index(act)
one_mask[idx] = 1
mask.append(one_mask)
two_masks.append(mask)
mask = []
for acts in self.actions_spaces:
one_mask = [-np.inf for _ in range(len(self.full_act_space))]
for act in acts:
idx = self.full_act_space.index(act)
one_mask[idx] = 0
mask.append(one_mask)
two_masks.append(mask)
return np.asarray(two_masks, dtype=np.float32)
def update_search_space(self, search_space):
"""
Get search space, currently the space only includes that for NAS
Parameters
----------
search_space : dict
Search space for NAS
the format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).
"""
logger.info('update search space %s', search_space)
assert self.search_space is None
self.search_space = search_space
assert self.model_config.observation_space is None
assert self.model_config.action_space is None
self.actions_spaces, self.actions_to_config, self.full_act_space, obs_space, nsteps = self._process_nas_space(search_space)
self.model_config.observation_space = spaces.Discrete(obs_space)
self.model_config.action_space = spaces.Discrete(obs_space)
self.model_config.nsteps = nsteps
# generate mask in numpy
mask = self._generate_action_mask()
assert self.model is None
self.model = PPOModel(self.model_config, mask)
def _actions_to_config(self, actions):
"""
Given actions, to generate the corresponding trial configuration
"""
chosen_arch = copy.deepcopy(self.chosen_arch_template)
for cnt, act in enumerate(actions):
act_name = self.full_act_space[act]
(_key, _type) = self.actions_to_config[cnt]
if _type == 'input_choice':
if act_name == 'None':
chosen_arch[_key] = {'_value': [], '_idx': []}
else:
candidates = self.search_space[_key]['_value']['candidates']
idx = candidates.index(act_name)
chosen_arch[_key] = {'_value': [act_name], '_idx': [idx]}
elif _type == 'layer_choice':
idx = self.search_space[_key]['_value'].index(act_name)
chosen_arch[_key] = {'_value': act_name, '_idx': idx}
else:
raise ValueError('unrecognized key: {0}'.format(_type))
return chosen_arch
def generate_multiple_parameters(self, parameter_id_list, **kwargs):
"""
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Parameters
----------
parameter_id_list : list of int
Unique identifiers for each set of requested hyper-parameters.
These will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
list
A list of newly generated configurations
"""
result = []
self.send_trial_callback = kwargs['st_callback']
for parameter_id in parameter_id_list:
had_exception = False
try:
logger.debug("generating param for %s", parameter_id)
res = self.generate_parameters(parameter_id, **kwargs)
except nni.NoMoreTrialError:
had_exception = True
if not had_exception:
result.append(res)
return result
def generate_parameters(self, parameter_id, **kwargs):
"""
Generate parameters, if no trial configration for now, self.credit plus 1 to send the config later
Parameters
----------
parameter_id : int
Unique identifier for requested hyper-parameters.
This will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
dict
One newly generated configuration
"""
if self.first_inf:
self.trials_result = [None for _ in range(self.inf_batch_size)]
mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values = self.model.inference(self.inf_batch_size)
self.trials_info = TrialsInfo(mb_obs, mb_actions, mb_values, mb_neglogpacs,
mb_dones, last_values, self.inf_batch_size)
self.first_inf = False
trial_info_idx, actions = self.trials_info.get_next()
if trial_info_idx is None:
logger.debug('Credit added by one in parameters request')
self.credit += 1
self.param_ids.append(parameter_id)
raise nni.NoMoreTrialError('no more parameters now.')
self.running_trials[parameter_id] = trial_info_idx
new_config = self._actions_to_config(actions)
return new_config
def _next_round_inference(self):
"""
Run a inference to generate next batch of configurations
"""
logger.debug('Start next round inference...')
self.finished_trials = 0
self.model.compute_rewards(self.trials_info, self.trials_result)
self.model.train(self.trials_info, self.inf_batch_size)
self.running_trials = {}
# generate new trials
self.trials_result = [None for _ in range(self.inf_batch_size)]
mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values = self.model.inference(self.inf_batch_size)
self.trials_info = TrialsInfo(mb_obs, mb_actions,
mb_values, mb_neglogpacs,
mb_dones, last_values,
self.inf_batch_size)
logger.debug('Next round inference complete.')
# check credit and submit new trials
for _ in range(self.credit):
trial_info_idx, actions = self.trials_info.get_next()
if trial_info_idx is None:
logger.warning('No enough trial config, trials_per_update is suggested to be larger than trialConcurrency')
break
assert self.param_ids
param_id = self.param_ids.pop()
self.running_trials[param_id] = trial_info_idx
new_config = self._actions_to_config(actions)
self.send_trial_callback(param_id, new_config)
self.credit -= 1
logger.debug('Send new trial (%d, %s) for reducing credit', param_id, new_config)
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
Receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to
train the model.
Parameters
----------
parameter_id : int
Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`.
parameters : dict
Hyper-parameters generated by :meth:`generate_parameters`.
value : dict
Result from trial (the return value of :func:`nni.report_final_result`).
"""
trial_info_idx = self.running_trials.pop(parameter_id, None)
assert trial_info_idx is not None
value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Minimize:
value = -value
self.trials_result[trial_info_idx] = value
self.finished_trials += 1
logger.debug('receive_trial_result, parameter_id %d, trial_info_idx %d, finished_trials %d, inf_batch_size %d',
parameter_id, trial_info_idx, self.finished_trials, self.inf_batch_size)
if self.finished_trials == self.inf_batch_size:
logger.debug('Start next round inference in receive_trial_result')
self._next_round_inference()
def trial_end(self, parameter_id, success, **kwargs):
"""
To deal with trial failure. If a trial fails, it is popped out from ``self.running_trials``,
and the final result of this trial is assigned with the average of the finished trials.
Parameters
----------
parameter_id : int
Unique identifier for hyper-parameters used by this trial.
success : bool
True if the trial successfully completed; False if failed or terminated.
**kwargs
Not used
"""
if not success:
if parameter_id not in self.running_trials:
logger.warning('The trial is failed, but self.running_trial does not have this trial')
return
trial_info_idx = self.running_trials.pop(parameter_id, None)
assert trial_info_idx is not None
# use mean of finished trials as the result of this failed trial
values = [val for val in self.trials_result if val is not None]
logger.warning('In trial_end, values: %s', values)
self.trials_result[trial_info_idx] = (sum(values) / len(values)) if values else 0
self.finished_trials += 1
if self.finished_trials == self.inf_batch_size:
logger.debug('Start next round inference in trial_end')
self._next_round_inference()
def import_data(self, data):
"""
Import additional data for tuning, not supported yet.
Parameters
----------
data : list
A list of dictionarys, each of which has at least two keys, ``parameter`` and ``value``
"""
logger.warning('PPOTuner cannot leverage imported data.')
| [] |
2024-01-10 | chanb/metalearning_RL | helper~envs~multiprocessing_env.py | #This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.unwrapped.reset_task(data)
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self, tasks):
for remote, task in zip(self.remotes, tasks):
remote.send(('reset_task', task))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs | [] |
2024-01-10 | mouseku/2023_KHUTHON | GCS_and_Deepl.py | import requests
import speech_recognition as sr
import threading
import queue
import keyboard
from openai import OpenAI
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Multi-Thread_01 controling input audio
class SpeechToTextThread(threading.Thread):
def __init__(self, audio_queue, flag):
threading.Thread.__init__(self)
self.recognizer = sr.Recognizer()
self.audio_queue = audio_queue
self.flag = flag
def run(self):
with sr.Microphone() as source:
self.recognizer.adjust_for_ambient_noise(source)
while not self.flag.is_set():
try:
print("I'm listening..Umm...")
audio = self.recognizer.listen(source, timeout=None, phrase_time_limit=10)
self.audio_queue.put(audio)
except sr.UnknownValueError:
print("I didn't understand your words...Come again Plz?")
print()
pass # Ignore if the audio is not recognized
except sr.RequestError as e:
print(f"Google Cloud Speech-to-Text request failed: {e}")
def main():
audio_queue = queue.Queue()
flag = threading.Event()
speech_thread = SpeechToTextThread(audio_queue, flag)
answer = "[Base] ChatGPT, which stands for Chat Generative Pre-trained Transformer, is a large language model-based chatbot developed by OpenAI and launched on November 30, 2022, that enables users to refine and steer a conversation towards a desired length, format, style, level of detail, and language. Successive prompts and replies, known as prompt engineering, are considered at each conversation stage as a context. [Answer] "
try:
# Start the speech-to-text thread
speech_thread.start()
url_for_deepl = 'https://api-free.deepl.com/v2/translate'
# Multi_Thread_Main requesting origin audio data to GOOGLE & printing configuration
while not flag.is_set():
try:
# Get audio from the queue
audio = audio_queue.get(block=True, timeout=1)
text = speech_thread.recognizer.recognize_google_cloud(
audio,
credentials_json='credential.json',
language='ko-KR',
)
params = {'auth_key' : 'auth_key', 'text' : text, 'source_lang' : 'KO', "target_lang": 'EN' }
result = requests.post(url_for_deepl, data=params, verify=False)
print(f"Transcription: {text}")
print(result.json()['translations'][0]["text"])
answer += result.json()['translations'][0]["text"]
except queue.Empty:
pass # Queue is empty, no audio available
except sr.UnknownValueError:
print("I didn't understand your words...Come again Plz?")
print()
pass # Ignore if the audio is not recognized
except sr.RequestError as e:
print(f"Google Cloud Speech-to-Text request failed: {e}")
except KeyboardInterrupt:
# Stop the speech-to-text thread when the program is interrupted
print("Exiting Education..")
flag.set()
speech_thread.join()
print(answer)
# f = open('gpt_key.txt', 'r')
# api_key = f.read()
api_key = "api_key"
client = OpenAI(api_key = api_key)
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "The Structured Feedback Analyzer now has an enhanced function. It will work with a base sentence provided once ([Base]) and multiple incomplete sentences ([Answer]). Each [Answer] will end with a flag indicating whether it is complete (<True>) or incomplete (<False>). The GPT's task is to assess the correctness of each [Answer] up to the point it is given. The focus is on analyzing the grammatical accuracy and contextual relevance of the [Answer] in relation to the [Base]. This GPT will not only compare the [Answer] to the [Base] but also evaluate the correctness of the [Answer] as a standalone sentence. The feedback provided will be concise, focusing on the correctness of the [Answer] up to the point it is given, without speculating about the missing parts. This structured approach will help users understand the accuracy and relevance of their answers in relation to the base sentence."},
{"role": "user", "content": answer}
]
)
print(completion.choices[0].message.content)
if __name__ == "__main__":
main() | [
"[Base] ChatGPT, which stands for Chat Generative Pre-trained Transformer, is a large language model-based chatbot developed by OpenAI and launched on November 30, 2022, that enables users to refine and steer a conversation towards a desired length, format, style, level of detail, and language. Successive prompts and replies, known as prompt engineering, are considered at each conversation stage as a context. [Answer] ",
"The Structured Feedback Analyzer now has an enhanced function. It will work with a base sentence provided once ([Base]) and multiple incomplete sentences ([Answer]). Each [Answer] will end with a flag indicating whether it is complete (<True>) or incomplete (<False>). The GPT's task is to assess the correctness of each [Answer] up to the point it is given. The focus is on analyzing the grammatical accuracy and contextual relevance of the [Answer] in relation to the [Base]. This GPT will not only compare the [Answer] to the [Base] but also evaluate the correctness of the [Answer] as a standalone sentence. The feedback provided will be concise, focusing on the correctness of the [Answer] up to the point it is given, without speculating about the missing parts. This structured approach will help users understand the accuracy and relevance of their answers in relation to the base sentence."
] |
2024-01-10 | alextanhongpin/python-bard | gr_app.py | # gradio gr_app.py
# open http://localhost:7860
import gradio as gr
from langchain.llms import GooglePalm
from langchain.prompts import PromptTemplate
import os
google_api_key = os.getenv("GOOGLE_API_KEY")
llm = GooglePalm(google_api_key=google_api_key, temperature=0.0)
TEMPLATE = """
You are an experienced technical writer able to explain complicated systems in simple words.
Improve the documentation below. Return the result as markdown. Add context and improve description too:
Documentation:
```
{text}
```
"""
def handle(template, byte_string):
text = byte_string.decode("utf-8")
# Create a LangChain prompt template that we can insert values to later
prompt = PromptTemplate(input_variables=["text"], template=template)
final_prompt = prompt.format(text=text)
return text, llm(final_prompt)
demo = gr.Interface(
fn=handle,
inputs=[
gr.Textbox(
lines=2, value=TEMPLATE, placeholder="Prompt here...", label="Prompt"
),
gr.File(
file_count="single",
file_types=[".md"],
container=True,
show_label=True,
type="binary",
),
],
outputs=[
gr.Markdown(label="Original"),
gr.Code(label="Output", language="markdown", interactive=True),
],
)
if __name__ == "__main__":
demo.launch(show_api=False)
| [
"\nYou are an experienced technical writer able to explain complicated systems in simple words.\nImprove the documentation below. Return the result as markdown. Add context and improve description too:\n\n\nDocumentation:\n```\n{text}\n```\n"
] |
2024-01-10 | pratyushd3v/limnoria-plugins-1 | ChatGPT~plugin.py | ###
# Copyright (c) 2023, oddluck
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot import utils, plugins, ircutils, callbacks
from supybot.commands import *
from supybot.i18n import PluginInternationalization
import openai
_ = PluginInternationalization("ChatGPT")
class ChatGPT(callbacks.Plugin):
"""Use the OpenAI ChatGPT API"""
threaded = True
def chat(self, irc, msg, args, text):
"""Manual Call to the ChatGPT API"""
openai.api_key = self.registryValue("api_key")
prompt = self.registryValue("prompt", msg.channel)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": text},
],
temperature=self.registryValue("temperature", msg.channel),
top_p=self.registryValue("top_p", msg.channel),
max_tokens=self.registryValue("max_tokens", msg.channel),
presence_penalty=self.registryValue("presence_penalty", msg.channel),
frequency_penalty=self.registryValue("frequency_penalty", msg.channel),
user=msg.nick,
)
response = " ".join(completion.choices[0].message.content.splitlines())
irc.reply(response)
chat = wrap(chat, ["text"])
Class = ChatGPT
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| [] |
2024-01-10 | ananthu666/ChromaDB_Hacknight | backend~chat2.py | import openai
import requests
import json
msg='''suggest five various dishes and their recipes based on the ingredients ->
name:'';
ingredients:'';
recipe:'';
'''
def sum(text):
openai.api_key = 'sk-******************************'
URL = "https://api.openai.com/v1/chat/completions"
payload = {
"model": "gpt-3.5-turbo",
"messages": [{"role": "user", "content": msg+str(text)}],
"temperature" : 1.0,
"top_p":1.0,
"n" : 1,
"stream": False,
"presence_penalty":0,
"frequency_penalty":0,
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai.api_key}"
}
response = requests.post(URL, headers=headers, json=payload, stream=False)
data=json.loads(response.content)
summary = data['choices'][0]['message']['content']
print(summary)
return summary
| [
"suggest five various dishes and their recipes based on the ingredients ->\n \n name:'';\n ingredients:'';\n recipe:'';\n \n PLACEHOLDER"
] |
2024-01-10 | AndrewHaward2310/DENSO_GPT_Expert | src~pages~3_%F0%9F%94%8E_chat_bot.py | from langchain.prompts import PromptTemplate
app_name = "DENSO GPT Expert"
# BOILERPLATE
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.vectorstores.chroma import Chroma
#from .prompts import prompt
from dotenv import load_dotenv,find_dotenv
import os
####################STAGE 0 LOAD CONFIG ############################
load_dotenv(find_dotenv(),override=True)
CHROMADB_HOST = os.environ.get("CHROMADB_HOST")
CHROMADB_PORT = os.environ.get("CHROMADB_PORT")
OPEN_AI_API_KEY = os.environ.get("OPEN_AI_API_KEY")
llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.5, openai_api_key=OPEN_AI_API_KEY)
import streamlit as st
import os
#model = HuggingFaceEmbeddings(model_name = "bkai-foundation-models/vietnamese-bi-encoder")
model = HuggingFaceEmbeddings(model_name='sentence-transformers/paraphrase-multilingual-mpnet-base-v2')
database = Chroma(persist_directory="../chroma_db", embedding_function=model)
st.set_page_config(layout='centered', page_title=f'{app_name}')
ss = st.session_state
if 'debug' not in ss: ss['debug'] = {}
#from DENSO_GPT_Expert.src.Core.model import get_similar_chunks, get_response_from_query
import streamlit as st
st.title("💬 DENSO GPT Expert")
st.caption("🚀 A chatbot powered by SmartABI")
st.sidebar.title("🤖 DENSO GPT Expert")
st.sidebar.write("Welcome to the DENSO GPT Expert")
def get_similar_chunks(query, db=database, k=4):
chunks = db.similarity_search_with_score(query=query, k=k)
return chunks
def get_response_from_query(query, chunks):
docs = " ".join([d[0].page_content for d in chunks])
llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0, openai_api_key=OPEN_AI_API_KEY)
prompt = PromptTemplate(
input_variables=["question", "docs"],
template="""
###
Bạn là một trợ lý quy trình, bạn có kiến thức về quy trình, hướng dẫn và tài liệu máy dựa trên tài liệu của nhà máy.
Dựa trên tài liệu được cung cấp dưới đây, hãy cung cấp hướng dẫn cho câu hỏi dưới đây dựa trên tài liệu đã cung cấp.
Hãy sử dụng ngôn ngữ hướng dẫn, kỹ thuật và một cách ngắn gọn.
Tài liệu: {docs}
Câu hỏi: {question}
Hãy cung cấp tất cả các câu trả lời bằng tiếng Việt.
###
""",
)
chain = LLMChain(llm=llm, prompt=prompt)
test_prompt = prompt.format(question=query, docs=docs)
st.write(test_prompt)
output = chain.run({'question': query, 'docs': docs})
return output
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
prompt = st.chat_input("Say something")
if prompt:
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
chunks = get_similar_chunks(query=prompt)
response = get_response_from_query(query=prompt,chunks=chunks)
st.session_state.messages.append({"role": "assistant", "content": response})
st.chat_message("assistant").write(response)
| [
"question",
"\n ###\n Bạn là một trợ lý quy trình, bạn có kiến thức về quy trình, hướng dẫn và tài liệu máy dựa trên tài liệu của nhà máy.\n Dựa trên tài liệu được cung cấp dưới đây, hãy cung cấp hướng dẫn cho câu hỏi dưới đây dựa trên tài liệu đã cung cấp.\n Hãy sử dụng ngôn ngữ hướng dẫn, kỹ thuật và một cách ngắn gọn.\n \n Tài liệu: {docs}\n Câu hỏi: {question}\n \n Hãy cung cấp tất cả các câu trả lời bằng tiếng Việt.\n ###\n ",
"How can I help you?",
"Say something"
] |
2024-01-10 | AndrewHaward2310/DENSO_GPT_Expert | test1.py | import os
from dotenv import load_dotenv, find_dotenv
from langchain.text_splitter import RecursiveCharacterTextSplitter
import fitz
from langchain.docstore.document import Document
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Weaviate
model = HuggingFaceEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")
#database = Chroma(persist_directory="./chroma_db", embedding_function=model)
_ = load_dotenv(find_dotenv(), override=True)
OPEN_AI_API_KEY=os.environ['OPEN_AI_API_KEY']
def split_document(docs, chunk_size=1000, chunk_overlap=20):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
# Splitting the documents into chunks
chunks = text_splitter.create_documents([docs])
return chunks
def insert_pdf_to_db(file_path):
# Load pdf into pages
pages = fitz.open(file_path)
chunks = [] # create empty chunks
# insert từng chunk vào chunk
for page in pages:
docs = split_document(page.get_text().replace('\n', ' ').lower()) # Return Langchain Documents list
for doc in docs:
chunk = Document(page_content=doc.page_content, metadata={'source': pages.name, 'page': page.number})
chunks.append(chunk)
# Tạo DB
# Chroma.from_documents(chunks, model, persist_directory="./chroma_db")
# print(chunks)
return chunks
sample_pdf_path = ["storage/LNCT800SoftwareApplicationManual.pdf"]
all_chunks = []
all_docs = []
for path in sample_pdf_path:
chunk = insert_pdf_to_db(path)
all_chunks.extend(chunk)
import weaviate
from weaviate.gql.get import HybridFusion
client = weaviate.Client(
url="http://localhost:8081"
)
db = Weaviate.from_documents(all_chunks, model, client = client, by_text=False)
# Perform similarity search
query = "INT3170"
vector = model.embed([query])[0] # Get the embedding vector for the query
docs = db.similarity_search(vector, k=4)
# Print the results
print(docs)
response = (
client.query
.get("JeopardyQuestion", ["question", "answer"])
.with_hybrid(query="INT3170", alpha=0.5)
.with_limit(5)
.do()
)
query_results = (
client.query
.get(
class_name="Article", # Replace with the actual class name in your schema
properties=[
"content",
"source",
"page",
],
)
.with_additional(properties=["score"])
.with_autocut(2)
.with_hybrid(
query=query,
fusion_type=HybridFusion.RELATIVE_SCORE,
properties=[
"content",
],
)
.do()
)
print(query_results) | [] |
2024-01-10 | DLOVRIC2/voiceup | app~video_generation~video_generator.py | import os
from dotenv import load_dotenv
from typing import List, Optional
import shutil
from mutagen.mp3 import MP3
from moviepy import editor
from PIL import Image
from moviepy.editor import TextClip, CompositeVideoClip, ImageClip
from moviepy.editor import *
import logging
from moviepy.video.io.VideoFileClip import VideoFileClip
import glob
from PIL import Image
import openai
import requests
# Load the environment variables
env_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), '.env')
load_dotenv(env_path)
class Frames:
INSTAGRAM_REEL = (1080, 1920) # size in pixels
YOUTUBE_REEL = (1920, 1080)
TIKTOK_REEL = (1080, 1920)
INSTAGRAM_POST = (1080, 1080)
class VideoGenerator:
# If the app is ran in docker, the db folder is copied into the app folder
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
storage_dir = "app/db/storage" if os.path.exists(os.path.join(root_dir, "app/db/storage")) else "db/storage"
video_storage_path = os.path.join(root_dir, storage_dir, "videos")
image_storage_path = os.path.join(root_dir, storage_dir, "images")
audio_storage_path = os.path.join(root_dir, storage_dir, "audios")
subtitle_storage_path = os.path.join(root_dir, storage_dir, "subtitles")
def __init__(self,
video_path: str = video_storage_path,
audio_path: str = audio_storage_path,
image_path: str = image_storage_path,
subtitle_path: str = subtitle_storage_path,
openai_api_key: Optional[str] = None,
stable_diff_api_key: Optional[str] = None):
"""
:param src: List[str] would be a list of image file locations [db/storage/images/image1.png, ] or it can be
a string "generate" which would use DALLE or Stable diffusion to generate new sets of images.
:param video_path: Where the newly generated video is stored
:param audio_path: Where the newly generated audio is stored
:param image_path: Where the newly generated audio is stored
:param openai_api_key - api key for OpenAI
:param stable_diff_api_key - api key for Stable Diffusion
"""
self.video_path = video_path
self.audio_path = audio_path
self.image_path = image_path
self.subtitle_path = subtitle_path
openai.api_key = os.environ.get("OPENAI_KEY", openai_api_key)
def upload_images(self, image_files: List[str], destination_folder: str):
"""
:param image_files: List of paths of images to upload
:param destination_folder: Folder to which images will be uploaded
"""
for image_file in image_files:
shutil.copy(image_file, destination_folder)
def resize_image(self, image_path: str, size: tuple = Frames.INSTAGRAM_REEL) -> str:
"""Resize an image to the specified size and save it.
Args:
image_path: The path to the image to resize.
size: The desired size as a tuple (width, height).
Returns:
The path to the saved image.
"""
img = Image.open(image_path)
img = img.resize(size)
new_image_path = image_path.rsplit('.', 1)[0] + '_resized.' + image_path.rsplit('.', 1)[1]
img.save(new_image_path)
return new_image_path
def read_audio_file(self, audio_file_path: str):
"""
:param audio_file_path: Path of the audio file to read
:return: Length of the audio file in seconds
"""
audio = MP3(audio_file_path)
return audio.info.length
def create_video(self, image_files: List[str], audio_file_path: str = None, video_size: tuple = Frames.INSTAGRAM_REEL):
"""
:param image_files: List of paths of images to use for the video
:param audio_file_path: Path of the audio file to use for the video
:param video_size: Tuple , defaults to size for IG reel
"""
if not audio_file_path:
# TODO: Current saving of audio is to a file called 'test.mp3' so if its not provided we will just grab that one. This needs to be updated.
audio_file_path = os.path.join(self.audio_path, "test.mp3")
# Calculate duration per image
audio_length = self.read_audio_file(audio_file_path)
duration_per_image = audio_length / len(image_files)
# Open, resize and save images as gif
images = [Image.open(image).resize(video_size, Image.ANTIALIAS) for image in image_files]
images[0].save("temp.gif", save_all=True, append_images=images[1:], duration=int(duration_per_image)*1000)
# Set output file name
output_file_name = os.path.splitext(os.path.basename(audio_file_path))[0] + '.mp4'
output_video_path = os.path.join(self.video_path, output_file_name)
# Combine audio and gif to create video
video = editor.VideoFileClip("temp.gif")
audio = editor.AudioFileClip(audio_file_path)
final_video = video.set_audio(audio)
final_video.write_videofile(output_video_path, fps=30, codec="libx264")
# Delete temporary gif
os.remove("temp.gif")
def generate_video_static(self, audio_file_path: str = None, static_image: Optional[str] = None):
"""
:param audio_file_path: Path of the audio file to use for the video
:param static_image: Path of the static image, defaults to black
"""
# Check static image
if not static_image:
static_image = os.path.join(self.image_path, "black_image.png")
if not audio_file_path:
# TODO: Current saving of audio is to a file called 'test.mp3' so if its not provided we will just grab that one. This needs to be updated.
audio_file_path = os.path.join(self.audio_path, "test.mp3")
# Load the audio file
audio = AudioFileClip(audio_file_path)
# Load the static image file and convert it to a clip with the duration of the audio
img_clip = ImageClip(static_image, duration=audio.duration)
# Set the audio of the video to the audio clip
video = img_clip.set_audio(audio)
# Create file output path
audio_name = os.path.splitext(os.path.basename(audio_file_path))[0] + ".mp4"
video_file_path = os.path.join(self.video_path, audio_name)
# Write the final video file
video.write_videofile(video_file_path, codec='libx264', temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac', fps=24)
def generate_subtitles(self, auido_file_path: str, subtitle_file_path: str, language='en'):
"""
:param language: Language code of the audio file's language (default is 'en' for English)
"""
# Generate a subtitle file name (without path) from audio file
subtitle_file_name = os.path.splitext(os.path.basename(subtitle_file_path))[0] + ".srt"
# If subtitle file does not exist in the directory, generate it
if not glob.glob(f"{self.subtitle_storage_path}/{subtitle_file_name}"):
# TODO: Figure out how to generate subtitles. This seems to be the fix
# https://stackoverflow.com/questions/66977227/could-not-load-dynamic-library-libcudnn-so-8-when-running-tensorflow-on-ubun
pass
def generate_images_with_dalle(self, api_key: str, prompt: str, size: tuple = Frames.INSTAGRAM_POST):
openai.api_key = api_key
size_for_openai = "1024x1024"
try:
generation_response = openai.Image.create(
prompt=prompt,
n=1,
size=size_for_openai,
response_format="url"
)
# save the image
generated_image_name = "generated_image.png" # any name you like; the filetype should be .png
generated_image_filepath = os.path.join(self.image_path, generated_image_name)
generated_image_url = generation_response["data"][0]["url"] # extract image URL from response
generated_image = requests.get(generated_image_url).content # download the image
with open(generated_image_filepath, "wb") as image_file:
image_file.write(generated_image)
# write the image to the file
print("Sucess!")
return generated_image_filepath
except Exception as e:
print(e)
if __name__ == "__main__":
img = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), "db/storage/images/black_image.png")
aud = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), "db/storage/audios/test.mp3")
vg = VideoGenerator()
# vg.generate_video_static(aud, img)
# Location of all the images
image_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), "db/storage/images/")
image_list = [file for file in os.listdir(image_path) if file.startswith("rand")]
# vg.create_video(image_files=image_list,
# audio_file_path=aud)
prompt = "rusty old phone booth"
vg.generate_images_with_dalle(prompt=prompt)
| [
"rusty old phone booth"
] |
2024-01-10 | DLOVRIC2/voiceup | app~story_generation~story_generator.py | from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SequentialChain, SimpleSequentialChain
from langchain.memory import SimpleMemory
import openai
import os
from dotenv import load_dotenv
# Load the environment variables
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(os.path.dirname(current_dir))
env_paht = os.path.join(project_root, ".env")
load_dotenv(env_paht)
class StoryTemplates:
"""
Class that holds all the templates required for the story generation, review and improvement.
"""
story_template = """You are a storywriter. Given a short description, you can generate a story based on the idea in 75-100 words.
Idea: {idea}"""
review_template = """You are a story critic. Given the generated story and the initial idea, it is your job to write a feedback on how to
imporve the story. Pay attention to things such as:
1. Is the length of the story within 75-100 words?
2. Is the story engaging?
Story: {story}"""
improve_template = """You are a storywriter. Given a generated story and a review from a critic, it is your job to improve the story.
Make sure you set the story length to MAXIMUM 150 words.
Story: {story}
Review: {review}
"""
class StoryGenerator:
def __init__(self, api_key: str = None, model: str = "gpt3.5-turbo"):
key = os.environ.get("OPENAI_KEY", api_key)
if not key:
raise ValueError("OPENAI API key must be provided.")
self.llm = OpenAI(temperature=0.9, openai_api_key=key)
def generate_story(self, idea):
"""
Method that uses llm chains to generates a story, reviews and modifies the story
accordingly.
Args:
idea: Input from the user on the story idea.
Returns:
str - LLM generated story
"""
# Story generation
story_template = PromptTemplate(input_variables=["idea"], template=StoryTemplates.story_template)
story_chain = LLMChain(llm=self.llm, prompt=story_template, output_key="story")
# Review
review_template = PromptTemplate(input_variables=["story"], template=StoryTemplates.review_template)
review_chain = LLMChain(llm=self.llm, prompt=review_template, output_key="review")
# Improve
improve_template = PromptTemplate(input_variables=["story", "review"], template=StoryTemplates.improve_template)
improve_chain = LLMChain(llm=self.llm, prompt=improve_template)
final_chain = SequentialChain(
chains=[story_chain, review_chain, improve_chain],
input_variables=["idea"],
verbose=True
)
return final_chain.run(idea)
if __name__ == "__main__":
chatbot = StoryGenerator()
# Testing
print(chatbot.generate_story("Story about a hackathon where a team of engineers is using elevenlabs to develop voice applications."))
| [
"You are a storywriter. Given a short description, you can generate a story based on the idea in 75-100 words.\n \n Idea: {idea}",
"You are a storywriter. Given a generated story and a review from a critic, it is your job to improve the story.\n Make sure you set the story length to MAXIMUM 150 words.\n Story: {story}\n Review: {review}\n ",
"You are a story critic. Given the generated story and the initial idea, it is your job to write a feedback on how to\n imporve the story. Pay attention to things such as:\n \n 1. Is the length of the story within 75-100 words?\n 2. Is the story engaging?\n \n Story: {story}"
] |
2024-01-10 | YoshimatsuSaito/formula1-map-dash | modules~wiki.py | import ast
import os
from datetime import datetime
import openai
import wikipediaapi
from dotenv import load_dotenv
load_dotenv(".env")
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Adhoc title name of wikipedia page
DICT_CIRCUIT_GPNAME = {
"bahrain": "Bahrain",
"jeddah": "Saudi Arabian",
"albert_park": "Australian",
"imola": "Emilia Romagna",
"miami": "Miami",
"catalunya": "Spanish",
"monaco": "Monaco",
"baku": "Azerbaijan",
"villeneuve": "Canadian",
"silverstone": "British",
"red_bull_ring": "Austrian",
"paulricard": "French",
"hungaroring": "Hungarian",
"spa": "Belgian",
"zandvoort": "Dutch",
"monza": "Italian",
"marina_bay": "Singapore",
"suzuka": "Japanese",
"americas": "United States",
"rodriguez": "Mexico City",
"interlagos": "Brazilian",
"yas_marina": "Abu Dhabi",
"losail": "Qatar",
"vegas": "Las Vegas",
"shanghai": "Chinese",
}
class WikiSearcher:
"""Search wikipedia page and extract information with LLM"""
def __init__(self):
self.wiki = wikipediaapi.Wikipedia(
language="en",
extract_format=wikipediaapi.ExtractFormat.WIKI,
user_agent="My User Agent - formula1-map-dash",
)
def create_dict_title_past(self, gpname, years_to_create=10):
"""Create wikipedia page titles of past races"""
last_year = datetime.now().year - 1
list_year = list(range(last_year, last_year - years_to_create, -1))
return {year: f"{self.create_page_title(gpname, year)}" for year in list_year}
def create_page_title(self, gpname, year):
"""Create wikipedia page title"""
if "Grand Prix" not in gpname:
return f"{year} {gpname} Grand Prix"
return f"{year} {gpname}"
def check_page_exists(self, title):
"""Check existence of the title page"""
title_page = self.wiki.page(title)
if not title_page.exists():
return False
else:
if title_page.title == title:
return True
# for redirect
else:
False
def get_page_content(self, title):
"""Get wikipedia page content"""
return self.wiki.page(title).text
def get_condition_of_race(self, page_text, model="gpt-3.5-turbo-16k"):
"""Infer a condition of race day with LLM"""
res = openai.ChatCompletion.create(
model=model,
messages=[
{
"role": "system",
"content": (
f"""
You are a helpful assistant to teach about a wikipedia page as shown below.
{page_text}
"""
),
},
{
"role": "user",
"content": (
f"""
Understand the conditions during the final race and output as follows:
If it can be determined that the race was conducted with no rain at all, the track surface was dry, and no red flags were raised, output as DRY.
In all other cases, output as RAIN.
Your output must be like below.
{{
'condition': {{your answer}}
}}
"""
),
},
],
)
return res["choices"][0]["message"]["content"]
def convert_to_dict(self, race_condition):
"""Convert str into dict"""
return ast.literal_eval(race_condition)
def get_recent_dry_race(self, gpname):
"""Get recent dry races of the grandprix"""
if gpname not in DICT_CIRCUIT_GPNAME.values():
raise ValueError(
f"gpname must be one of the {DICT_CIRCUIT_GPNAME.values()}"
)
# Get page title to search
dict_title = self.create_dict_title_past(gpname)
# Get page title existed
dict_title = {k: v for k, v in dict_title.items() if self.check_page_exists(v)}
# Get page content
dict_page_content = {k: self.get_page_content(v) for k, v in dict_title.items()}
# Loop recent years
for year, page_content in dict_page_content.items():
# Retry {num_retry} times to get condition with LLM
num_retry = 0
while num_retry < 10:
try:
res = self.get_condition_of_race(page_content)
condition = self.convert_to_dict(res)["condition"]
break
except:
num_retry += 1
# Proceed to next loop if all attempt was failed
if num_retry == 10:
continue
# Otherwise check a condition of the year is DRY or not
if condition == "DRY":
return year
# If there is no DRY race, return None
return None
| [
"\n You are a helpful assistant to teach about a wikipedia page as shown below. \n \n PLACEHOLDER\n ",
"\n Understand the conditions during the final race and output as follows:\n \n If it can be determined that the race was conducted with no rain at all, the track surface was dry, and no red flags were raised, output as DRY.\n In all other cases, output as RAIN.\n Your output must be like below.\n \n {\n 'condition': {your answer}\n }\n \n "
] |
2024-01-10 | mazen-hassani/LLM_Project | streamlit.py | import streamlit as st
from langchain.embeddings import CohereEmbeddings
import os
from utils import find_closest_embedding
import pandas as pd
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
cohere_api_key = os.getenv("COHERE_API_KEY")
mental_health_faq_filename = os.getenv("FAQ_DB")
df = pd.read_csv(mental_health_faq_filename)
embeddings = CohereEmbeddings(cohere_api_key=cohere_api_key)
st.title("Mental Health FAQ")
# Add a text input widget for the user to enter their question
prompt = st.text_input("Enter your question about mental health:")
# Add a button widget to trigger the search
if st.button("Search"):
# Generate an embedding for the question using the Cohere API
embedding = embeddings.embed_query(prompt)
index = find_closest_embedding(embedding)
# Add a text output widget to display the answer
st.write(df.iloc[index[0][0]]["Answers"]) | [
"Enter your question about mental health:"
] |
2024-01-10 | mazen-hassani/LLM_Project | embeddings_generator.py | import pandas as pd
from langchain.embeddings import CohereEmbeddings
import os
from utils import save_to_vector_database
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
mental_health_faq_filename = os.getenv("FAQ_DB")
df = pd.read_csv(mental_health_faq_filename, nrows=10)
cohere_api_key = os.getenv("COHERE_API_KEY")
embeddings = CohereEmbeddings(cohere_api_key=cohere_api_key)
embeddings_vectors = []
for index, row in df.iterrows():
# Extract the question text from the current row
question = row['Questions']
# Generate an embedding for the question using the Cohere API
embedding = embeddings.embed_query(question)
# Store the embedding in a dictionary with the question ID as key
embeddings_vectors.append(embedding)
# Save the embeddings to a vector database (e.g., Elasticsearch or Faiss)
save_to_vector_database(embeddings_vectors)
| [] |
2024-01-10 | pppa2019/lateral_thinking | get_response.py | from unittest import result
import openai
import os
import numpy as np
from transformers import TextGenerationPipeline
from time import sleep
from utils.metrics import bert_similarity
import time
import torch
def get_response_from_OpenAI(prompt, model=None, tokenizer=None):
result = ""
count = 0
print(prompt)
while True:
try:
# print('get gpt4 ][')
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# model="gpt-4",
messages=[
{"role": "user", "content": prompt},
],
)
break
except Exception as e:
print(e)
print("Retry in 10 seconds")
time.sleep(10)
# response = response["choices"][0]["message"]["content"]
print(response.get('choices')[0]["message"]["content"])
result_raw = response.get('choices')[0]["message"]["content"]
result = response.get('choices')[0]["message"]["content"].strip().split("\n")[0]
request_log = response.get('choices')[0]
count += 1
# if result == '':
# import ipdb;ipdb.set_trace()
# sleep(5)
# if result == "":
# print('Connetion failed! Sleep for 15 sec...')
# sleep(15)
return result, result_raw, request_log
def get_response_from_KG(puzzle_kg_path, solution_kg_path, question, model):
'''
TODO:
- parse question to event graph
- fuzzy match to judge if there is event or entity overlap
'''
import sys
sys.path.append('.')
from utils.load_kg_ann import load_kg_from_ann
from amr2brat import convert_sent2EventGraph
def abstract_entity_event(span_dict):
entity_list = []
event_list = []
for _, value in span_dict.items():
if value[0]=='Head_End':
if len(value)==2:
entity_list.append(value[1])
else:
entity_list.append(' '.join(value[3:]))
elif value[0]=='Event':
if len(value)==2:
entity_list.append(value[1])
else:
event_list.append(' '.join(value[3:]))
return entity_list, event_list
def abstract_event_triple(span_dict, event_dict):
event_triple_list = []
for value in event_dict.values():
event_triple_list.append(' '.join([span_dict[span_id.split(':')[-1]][-1] for span_id in value]))
return event_triple_list
pz_span_dict, _, pz_event_dict = load_kg_from_ann(puzzle_kg_path)
sl_span_dict, _, sl_event_dict = load_kg_from_ann(solution_kg_path)
q_span_dict, _, q_event_dict = convert_sent2EventGraph(question)
pz_entity, pz_event = abstract_entity_event(pz_span_dict)
sl_entity, sl_event = abstract_entity_event(sl_span_dict)
q_entity, q_event = abstract_entity_event(q_span_dict)
pz_event_triples = abstract_event_triple(pz_span_dict, pz_event_dict)
sl_event_triples = abstract_event_triple(sl_span_dict, sl_event_dict)
q_event_triples = abstract_event_triple(q_span_dict, q_event_dict)
# none event, if match entity, then yes
threshold = 0.6
q_entity = list(set(q_entity))
pz_entity = list(set(pz_entity))
story_entity = list(set(sl_event+pz_event))
q_event_triples = list(set(q_event_triples))
story_event_triples = list(set(sl_event_triples+pz_event_triples))
pz_event_triples = list(set(pz_event_triples))
simi_score = bert_similarity(q_entity, story_entity, model)
hint_score = bert_similarity(q_entity, pz_entity, model)
event_score = bert_similarity(q_event, story_event_triples, model)
match_event1_id, match_event2_id = np.where(event_score>threshold)
match_hit1_id, match_hit2_id = np.where(simi_score>threshold)
hint_hit1_id, hint_hit2_id = np.where(hint_score>threshold)
keyword_list = []
for id in hint_hit1_id:
keyword_list.append(q_entity[id])
for id in hint_hit2_id:
keyword_list.append(pz_entity[id])
keyword_list = list(set(keyword_list))
if len(pz_entity+sl_entity)==0 and len(match_hit1_id)>0:
return 'Yes', keyword_list
# have event,
if len(match_hit1_id)>0:
if len(match_event1_id):
return 'Yes', keyword_list
return 'No', keyword_list
return "Irrelevant", []
def get_response_from_chatglm(prompt, model, tokenizer):
response, _ = model.chat(tokenizer, prompt, history=[])
return response, None, None
def get_response_from_llama(prompt, model, tokenizer):
pipeline = TextGenerationPipeline(model=model, batch_size=1, tokenizer=tokenizer,
return_full_text=False,
clean_up_tokenization_spaces=True,
handle_long_generation="hole")
pipeline.tokenizer.pad_token_id = model.config.eos_token_id
with torch.no_grad():
hypothesis = pipeline(prompt, temperature=0.1, num_beams=4, max_length=4096, top_p=0.9, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id)
hypothesis = [item['generated_text'] for item in hypothesis]
return hypothesis[0], None, None
| [] |
2024-01-10 | tensorchord/modelz-llm | client.py | import argparse
import openai
openai.api_base = "http://localhost:8000"
openai.api_key = "test"
openai.debug = True
def chat():
chat_completion = openai.ChatCompletion.create(
model="fastchat-t5-3b-v1.0",
messages=[
{"role": "user", "content": "Who are you?"},
{"role": "assistant", "content": "I am a student"},
{"role": "user", "content": "What do you learn?"},
{"role": "assistant", "content": "I learn math"},
{"role": "user", "content": "Do you like english?"},
],
max_tokens=100,
)
print(chat_completion)
def embedding():
emb = openai.Embedding.create(
input=["Once upon a time", "There was a frog", "Who lived in a well"],
model="text-embedding-ada-002",
)
print(emb)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--chat", action="store_true")
parser.add_argument("-e", "--embedding", action="store_true")
args = parser.parse_args()
if args.chat:
chat()
if args.embedding:
embedding()
| [
"Who are you?",
"What do you learn?",
"Do you like english?",
"I am a student",
"I learn math"
] |
2024-01-10 | malcolmk181/athena | python~graph_handling.py | """
graph_handling.py
Contains functions & classes for creating graphs and pulling information from them.
"""
from typing import List, Optional
from langchain.chains.openai_functions import create_structured_output_chain
from langchain.chat_models import ChatOpenAI
from langchain.graphs import Neo4jGraph
from langchain.graphs.graph_document import (
GraphDocument,
Node as BaseNode,
Relationship as BaseRelationship,
)
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema import Document
from tqdm import tqdm
import embedding_handling
import file_handling
from load_environment import load_environment
load_environment()
GPT3_TURBO = ChatOpenAI(model="gpt-3.5-turbo-1106", temperature=0)
GPT4_TURBO = ChatOpenAI(model="gpt-4-1106-preview", temperature=0)
GPT3 = ChatOpenAI(model="gpt-3.5-turbo-16k", temperature=0)
GPT4 = ChatOpenAI(model="gpt-4", temperature=0)
NEO4J_URL = "bolt://localhost:7687"
NEO4J_USERNAME = "neo4j"
NEO4J_PASSWORD = "athena_password"
class Property(BaseModel):
"""A single property consisting of key and value"""
key: str = Field(..., description="key")
value: str = Field(..., description="value")
class Node(BaseNode):
properties: Optional[List[Property]] = Field(
None, description="List of node properties"
)
class Relationship(BaseRelationship):
properties: Optional[List[Property]] = Field(
None, description="List of relationship properties"
)
class KnowledgeGraph(BaseModel):
"""Generate a knowledge graph with entities and relationships."""
nodes: List[Node] = Field(..., description="List of nodes in the knowledge graph")
rels: List[Relationship] = Field(
..., description="List of relationships in the knowledge graph"
)
class NodeNameList(BaseModel):
"""A list of the names of knowledge graph nodes."""
names: list[str] = Field(
..., description="List of desired node names from a knowledge graph"
)
def format_property_key(string: str) -> str:
"""Format property keys into snake case."""
words = [word.lower() for word in string.split()]
if not words:
return string.lower()
return "_".join(words)
def props_to_dict(props: list[Property]) -> dict:
"""Convert properties to a dictionary."""
properties = {}
if not props:
return properties
for prop in props:
properties[format_property_key(prop.key)] = prop.value
return properties
def map_to_base_node(node: Node) -> BaseNode:
"""Map the KnowledgeGraph Node to the base Node."""
properties = props_to_dict(node.properties) if node.properties else {}
# Add name property for better Cypher statement generation
properties["name"] = node.id.title()
return BaseNode(
id=node.id.title(), type=node.type.capitalize(), properties=properties
)
def map_to_base_relationship(rel: Relationship) -> BaseRelationship:
"""Map the KnowledgeGraph Relationship to the base Relationship."""
source = map_to_base_node(rel.source)
target = map_to_base_node(rel.target)
properties = props_to_dict(rel.properties) if rel.properties else {}
return BaseRelationship(
source=source, target=target, type=rel.type, properties=properties
)
def get_extraction_chain(
llm: ChatOpenAI,
allowed_nodes: Optional[List[str]] = None,
allowed_rels: Optional[List[str]] = None,
verbose: bool = False,
):
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
f"""# Knowledge Graph Instructions for GPT
## 1. Overview
You are a top-tier algorithm designed for extracting information from markdown notes in structured formats to build a knowledge graph.
- **Nodes** represent entities and concepts. They're akin to Wikipedia nodes.
- The aim is to achieve simplicity and clarity in the knowledge graph, making it accessible for a vast audience.
## 2. Labeling Nodes
- **Consistency**: Ensure you use basic or elementary types for node labels.
- For example, when you identify an entity representing a person, always label it as **"person"**. Avoid using more specific terms like "mathematician" or "scientist".
- **Node IDs**: Never utilize integers as node IDs. Node IDs should be names or human-readable identifiers found in the text.
{'- **Allowed Node Labels:**' + ", ".join(allowed_nodes) if allowed_nodes else ""}
{'- **Allowed Relationship Types**:' + ", ".join(allowed_rels) if allowed_rels else ""}
## 3. Handling Numerical Data and Dates
- Numerical data, like age or other related information, should be incorporated as attributes or properties of the respective nodes.
- **No Separate Nodes for Dates/Numbers**: Do not create separate nodes for dates or numerical values. Always attach them as attributes or properties of nodes.
- **Property Format**: Properties must be in a key-value format.
- **Quotation Marks**: Never use escaped single or double quotes within property values.
- **Naming Convention**: Use camelCase for property keys, e.g., `birthDate`.
## 4. Coreference Resolution
- **Maintain Entity Consistency**: When extracting entities, it's vital to ensure consistency.
If an entity, such as "John Doe", is mentioned multiple times in the text but is referred to by different names or pronouns (e.g., "Joe", "he"),
always use the most complete identifier for that entity throughout the knowledge graph. In this example, use "John Doe" as the entity ID.
Remember, the knowledge graph should be coherent and easily understandable, so maintaining consistency in entity references is crucial.
## 5. Strict Compliance
Adhere to the rules strictly. Non-compliance will result in termination.
""",
),
(
"human",
"Use the given format to extract information from the following input: {input}",
),
("human", "Tip: Make sure to answer in the correct format"),
]
)
return create_structured_output_chain(KnowledgeGraph, llm, prompt, verbose=verbose)
def get_graph_connector() -> Neo4jGraph:
"""Returns a wrapper for the Neo4j database."""
return Neo4jGraph(
url=NEO4J_URL,
username=NEO4J_USERNAME,
password=NEO4J_PASSWORD,
)
def delete_graph(are_you_sure: bool) -> None:
"""This will wipe all nodes & relationships from the Neo4j Graph."""
if are_you_sure:
result = get_graph_connector().query("MATCH (n) DETACH DELETE n")
if len(result) == 0:
print("Neo4j database emptied.")
else:
print("Delete query returned results. Something may have gone wrong.")
def get_knowledge_graph_from_chunk(
chunk: Document,
llm: ChatOpenAI,
allowed_nodes: list[str] | None = None,
allowed_rels: list[str] | None = None,
verbose: bool = False,
) -> KnowledgeGraph:
"""Runs the LLM function to extract a Knowledge Graph from a document chunk."""
return get_extraction_chain(llm, allowed_nodes, allowed_rels, verbose).run(
chunk.page_content
)
def create_graph_document_from_note(
file_name: str,
llm: ChatOpenAI,
allowed_nodes: list[str] | None = None,
allowed_rels: list[str] | None = None,
verbose: bool = False,
) -> GraphDocument:
file_store = file_handling.load_file_store()
if file_store is None:
print("Failed to retrieve file store. Exiting graph creation.")
return
collection = embedding_handling.get_vector_store_collection()
doc, chunks = embedding_handling.get_chunks_from_file_name(file_name)
# make vault node
vault_node = BaseNode(id="ObsidianVault", type="ObsidianVaultNode")
# make note node
note_node = BaseNode(
id=file_store[file_name]["uuid"],
type="ObsidianNote",
properties={"file_name": file_name},
)
# vault to note relationship
vault_note_relationship = BaseRelationship(
source=vault_node, target=note_node, type="contains_note"
)
all_base_nodes = [vault_node, note_node]
all_base_relationships = [vault_note_relationship]
# get knowledge graph from chunks
for i, chunk in tqdm(
enumerate(chunks),
desc="Creating graph from each document chunk",
total=len(chunks),
):
chunk_kg = get_knowledge_graph_from_chunk(
chunk, llm, allowed_nodes, allowed_rels, verbose
)
# convert knowledge graph into base nodes & base relationships
base_nodes = [map_to_base_node(node) for node in chunk_kg.nodes]
base_relationships = [map_to_base_relationship(rel) for rel in chunk_kg.rels]
# make chunk node
chunk_node = BaseNode(
id=file_store[file_name]["chunks"][i],
type="ObsidianNoteChunk",
properties={
"file_name": file_name,
"chunk_number": i,
"embeddings": collection.get(
ids=file_store[file_name]["chunks"][i], include=["embeddings"]
)["embeddings"][0],
},
)
# add relationship between note node and chunk node
note_to_chunk_relationship = BaseRelationship(
source=note_node, target=chunk_node, type="contains_chunk"
)
# add relationships between chunk nodes and GPT-generated nodes
chunk_to_node_relationships = []
for node in base_nodes:
chunk_to_node_relationships.append(
BaseRelationship(source=chunk_node, target=node, type="references_node")
)
# collect all nodes & relationships
all_base_nodes += base_nodes + [chunk_node]
all_base_relationships += (
base_relationships
+ chunk_to_node_relationships
+ [note_to_chunk_relationship]
)
# assemble nodes & relationships into GraphDocument
graph_document = GraphDocument(
nodes=all_base_nodes, relationships=all_base_relationships, source=doc
)
return graph_document
# later, graph.add_graph_documents([graph_document])
def get_all_node_names() -> list[str]:
"""Returns a list of all the names of the nodes in the graph"""
names: list[dict] = get_graph_connector().query(
"""
MATCH (n)
WHERE n.name IS NOT NULL
RETURN n.name"""
)
return [list(d.values())[0] for d in names]
def get_relevant_nodes_from_question(
llm: ChatOpenAI,
node_name_list: list[str],
question: str,
verbose: bool = False,
) -> NodeNameList:
"""
Uses LLM to shorten & sort a list of node names by how relevant they are to a
user question. This *does* appear to use the LLM's previous knowledge.
"""
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""# Prompt for GPT-4:
Question:
"{question}"
List of Node Names from the Knowledge Graph:
{names}
# Task for GPT-4:
Analyze the provided list of names from the knowledge graph in the context of the question. Identify and list the names that are most relevant to the question, ordering them from the most important to less important. Do not include names that are not very important. Consider only the content of the question and do not use prior knowledge.
""",
),
("human", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_structured_output_chain(NodeNameList, llm, prompt, verbose=verbose)
return chain.run(question=question, names=", ".join(node_name_list))
def get_chunk_ids_by_node_names(node_names: list[str]) -> list[str]:
"""Given a list of node names, returns the ids of the chunks that reference them.
May contain duplicates.
"""
if len(node_names) == 0:
return []
# This query will collect ids even if there are duplicates of the named node
ids: list[dict] = get_graph_connector().query(
f"""
MATCH (n)
WHERE n.name IN [{",".join([f'"{name}"' for name in node_names])}]
OPTIONAL MATCH (n)-[r]-(related:ObsidianNoteChunk)"""
+ """ RETURN collect({id: related.id}) as relatedNodes
"""
)
return [list(d.values())[0] for d in ids[0]["relatedNodes"]]
def get_non_housekeeping_relationships_from_node_name(
node_name: str,
allowed_names: list[str] | None = None,
) -> list[tuple[dict, str, dict]]:
"""
Given a node name, will return the relationships between that node and the other
non-housekeeping nodes in the graph.
If a list of names is provided in allowed_names, will only return the relationships
that occur between the primary node and any of the allowed nodes.
"""
allowed_node_syntax = ""
# Block of Cypher for modifying the results to blank out non-allowed nodes
if allowed_names:
allowed_node_syntax += "WHEN NOT related.name IN ["
allowed_node_syntax += ",".join([f'"{name}"' for name in allowed_names])
allowed_node_syntax += "]\nTHEN {label: 'Unrelated'}"
query_results: list[dict] = get_graph_connector().query(
f"""
MATCH (n)
WHERE n.name = '{node_name}'"""
+ """
OPTIONAL MATCH (n)-[r]-(related)
RETURN n,
collect(r) as relationships,
collect(
CASE
WHEN 'ObsidianNoteChunk' IN labels(related)
THEN {label: 'ObsidianNoteChunk'}"""
+ allowed_node_syntax
+ """
ELSE related
END
) as relatedNodes
"""
)
results: list[tuple[dict, str, dict]] = []
# could be more than one node with the same name
for node in query_results:
for relationship in node["relationships"]:
# second item is edge type
# len checks are to make sure the node didn't get filtered out
if (
relationship[1] != "REFERENCES_NODE"
and len(relationship[0]) != 0
and len(relationship[2]) != 0
):
results.append(relationship)
return results
def get_interrelationships_between_nodes(
node_names: list[str],
) -> list[tuple[dict, str, dict]]:
"""Given a list of node names, will return the relationships between them."""
node_str = ",".join([f'"{node}"' for node in node_names])
query_results: list[dict] = get_graph_connector().query(
f"""
UNWIND [{node_str}] AS nodeName1
UNWIND [{node_str}] AS nodeName2
MATCH (n1)-[r]->(n2)
WHERE n1.name = nodeName1 AND n2.name = nodeName2
RETURN n1, r, n2
"""
)
results: list[tuple[dict, str, dict]] = []
# one row per relationship
for row in query_results:
results.append(row["r"])
return results
def summarize_relationship(
llm: ChatOpenAI,
relationship: tuple[dict, str, dict],
verbose: bool = False,
) -> str:
"""Uses LLM to summarize the relationship between two nodes."""
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""# Prompt for GPT-4:
Relationship between two nodes:
"{relationship}"
# Task for GPT-4:
This is a relationship between two nodes in a Neo4j graph. Please use this information to give a summary of this relationship in a succinct paragraph that does not mention anything about a graph or nodes.
""",
),
]
)
chain = create_structured_output_chain(str, llm, prompt, verbose=verbose)
return chain.run(relationship=relationship)
| [
"- **Allowed Node Labels:**",
"# Prompt for GPT-4:\nRelationship between two nodes:\n\"{relationship}\"\n\n# Task for GPT-4:\nThis is a relationship between two nodes in a Neo4j graph. Please use this information to give a summary of this relationship in a succinct paragraph that does not mention anything about a graph or nodes.\n",
"[('system', '# Prompt for GPT-4:\\nRelationship between two nodes:\\n\"{relationship}\"\\n\\n# Task for GPT-4:\\nThis is a relationship between two nodes in a Neo4j graph. Please use this information to give a summary of this relationship in a succinct paragraph that does not mention anything about a graph or nodes.\\n')]",
"human",
"- **Allowed Relationship Types**:",
"Use the given format to extract information from the following input: {input}",
"Tip: Make sure to answer in the correct format",
"# Prompt for GPT-4:\nQuestion:\n\"{question}\"\n\nList of Node Names from the Knowledge Graph:\n{names}\n\n# Task for GPT-4:\nAnalyze the provided list of names from the knowledge graph in the context of the question. Identify and list the names that are most relevant to the question, ordering them from the most important to less important. Do not include names that are not very important. Consider only the content of the question and do not use prior knowledge.\n",
", ",
"[('system', '# Prompt for GPT-4:\\nQuestion:\\n\"{question}\"\\n\\nList of Node Names from the Knowledge Graph:\\n{names}\\n\\n# Task for GPT-4:\\nAnalyze the provided list of names from the knowledge graph in the context of the question. Identify and list the names that are most relevant to the question, ordering them from the most important to less important. Do not include names that are not very important. Consider only the content of the question and do not use prior knowledge.\\n'), ('human', 'Tip: Make sure to answer in the correct format')]"
] |
2024-01-10 | malcolmk181/athena | python~ask_questions.py | """
ask_questions.py
Functions for answering questions based on the embeddings and knowledge graph created.
"""
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
from tqdm import tqdm
import graph_handling
import embedding_handling
def get_answer_from_sources(
question: str,
relationship_summaries: list[str],
document_chunks: list[str],
llm: ChatOpenAI = graph_handling.GPT4_TURBO,
) -> str:
if len(relationship_summaries) == 0 and len(document_chunks) == 0:
return "Sorry, but I couldn't find enough information to answer your question."
formatted_summaries = "\n\n".join(relationship_summaries)
formatted_chunks = "\n\n".join(document_chunks)
messages = [
SystemMessage(
content=f"""You are a helpful assistant that responds to user queries given the following context. You are given both a series of facts from a knowledge graph, and raw chunks of documents. Combine both sources to provide a truthful answer.
Here is a collection of facts pulled from a knowledge graph:
{formatted_summaries}
Here is a collection of relevant chunks of notes, pulled from a collection of the user's documents:
{formatted_chunks}
Use all of the above resources to answer the user's question in as much detail as the context can provide by itself. If the provided facts or document chunks do not provide enough information to answer the question, then say "Sorry, but I could not find enough information to answer the question."
"""
),
HumanMessage(content=question),
]
return llm(messages).content
def answer_question(
question: str,
llm: ChatOpenAI = graph_handling.GPT4_TURBO,
) -> str:
"""
Answers a question based on the embeddings and knowledge graph created from the
Obsidian vault.
This pulls in information from both the embeddings and the knowledge graph:
For the embeddings:
- Uses GPT to convert the question into a declarative statement
- Creates embeddings from the declarative statement
- Does a search of the vector store and grabs the most relevant document chunk IDs
by cosine similarity
For the knowledge graph:
- Uses GPT to identify which nodes in the graph are most relevant to the question
- Collects the relationships between these nodes in the graph and has GPT summarize
these relationships
- Collects the IDs of the document chunks that reference the most relevant nodes
Together:
- Combines the collected document chunk IDs from the embeddings and knowledge graph,
and queries the vector store to collect the associated document chunks
- Sends GPT a prompt containing the relationship summaries and the retrieved
document chunks, alongside the user's original question.
"""
result = ""
with tqdm(total=7) as pbar:
print(
"Converting user query into declarative statement, creating embeddings, and searching the vector store for similar document chunks."
)
vector_search_result = embedding_handling.user_query_to_chromadb_query_result(
question
)
pbar.update(1)
print("Collecting most relevant document chunks by embedding similarity.")
chunk_ids_from_embeddings = embedding_handling.chroma_query_result_to_chunk_ids(
vector_search_result
)
pbar.update(1)
print("Identifying most relevant nodes in knowledge graph.")
node_names = graph_handling.get_relevant_nodes_from_question(
graph_handling.GPT4, graph_handling.get_all_node_names(), question
).names
pbar.update(1)
print("Collecting relevant document chunks by identified KG nodes.")
chunk_ids_from_graph = graph_handling.get_chunk_ids_by_node_names(node_names)
pbar.update(1)
# Combine the chunk IDs
combined_chunk_ids = list(set(chunk_ids_from_embeddings + chunk_ids_from_graph))
# Grab vector store connection
collection = embedding_handling.get_vector_store_collection()
# Grab document chunks by IDs
docs = collection.get(ids=combined_chunk_ids, include=["documents"])[
"documents"
]
print("Collecting relationships from knowledge graph between identified nodes.")
node_relationships = graph_handling.get_interrelationships_between_nodes(
node_names
)
pbar.update(1)
print("Summarizing knowledge graph relationships.")
node_relationship_summaries = list(
map(
lambda r: graph_handling.summarize_relationship(
graph_handling.GPT4_TURBO, r
),
node_relationships,
)
)
pbar.update(1)
print(
"Providing relationships and document chunks to GPT to answer the question."
)
result = get_answer_from_sources(
question, node_relationship_summaries, docs, llm
)
pbar.update(1)
return result
| [
"You are a helpful assistant that responds to user queries given the following context. You are given both a series of facts from a knowledge graph, and raw chunks of documents. Combine both sources to provide a truthful answer.\n \n Here is a collection of facts pulled from a knowledge graph:\n\n PLACEHOLDER\n\n Here is a collection of relevant chunks of notes, pulled from a collection of the user's documents:\n\n PLACEHOLDER\n\n Use all of the above resources to answer the user's question in as much detail as the context can provide by itself. If the provided facts or document chunks do not provide enough information to answer the question, then say \"Sorry, but I could not find enough information to answer the question.\"\n "
] |
2024-01-10 | dmarx/langchain-hub | ci_scripts~file-check.py | from pathlib import Path
from langchain.prompts import load_prompt
BASE_FOLDER = Path("prompts")
folders = BASE_FOLDER.glob("**")
def check_files(files):
file_names = [f.name for f in files]
if "README.md" not in file_names:
raise ValueError(f"Expected to find a README.md file, but found {files}")
other_files = [file for file in files if file.name != "README.md"]
for other_file in other_files:
if other_file.suffix in (".json", ".yaml"):
load_prompt(other_file)
# TODO: testing for python files
def check_all_folders():
for folder in folders:
folder_path = Path(folder)
files = [x for x in folder_path.iterdir() if x.is_file()]
if len(files) > 0:
try:
check_files(files)
except Exception as e:
raise ValueError(f"Found error with {folder}: {e}")
if __name__ == "__main__":
check_all_folders()
| [] |
2024-01-10 | circlestarzero/Openaibot | test~nlp_server_test.py | # -*- coding: utf-8 -*-
# @Time : 12/7/22 10:14 PM
# @FileName: nlp.py
# @Software: PyCharm
# @Github :sudoskys
# NO USE
from prompt_server import create_item
from openai_kira.Chat import Chatbot
import nltk
nltk.download('punkt')
nltk.download('stopwords')
| [] |
2024-01-10 | circlestarzero/Openaibot | utils~Frequency.py | # -*- coding: utf-8 -*-
# @Time : 1/5/23 10:05 AM
# @FileName: Frequency.py
# @Software: PyCharm
# @Github :sudoskys
import random
import time
from openai_kira import Chat
from openai_kira.utils.chat import Utils
from utils.Data import User_Message, Service_Data, RedisConfig, DataWorker
service = Service_Data.get_key()
redis_conf = service["redis"]
redis_config = RedisConfig(**redis_conf)
# 工具数据类型
Trigger = DataWorker(host=redis_config.host,
port=redis_config.port,
db=redis_config.db,
password=redis_config.password,
prefix="Open_Ai_bot_trigger_")
class CheckSeq(object):
def __init__(self):
self._help_keywords = ["怎么",
"How",
"今天",
"吗?",
"什么",
"知道",
"无聊",
"啊?",
"What",
"what",
"who",
"how",
"Who",
"Why",
"why",
"Where",
"谁能",
"呢",
"吗",
"How to",
"how to",
"如何做",
"帮我",
"帮助我",
"请给我",
"给出建议",
"给建议",
"给我",
"给我一些",
"请教",
"介绍",
"如何",
"帮朋友",
"需要什么",
"注意什么",
"草",
"呀",
"怎么办"
]
def help(self, text):
has = False
for item in self._help_keywords:
if item in text:
has = True
return has
class Vitality(object):
def __init__(self, group_id: int):
self.group_id = str(group_id)
self.time_interval = 60 * 10
_oid = f"-{abs(group_id)}"
self.receiver = Chat.Chatbot(
api_key="1",
conversation_id=int(_oid),
token_limit=1500,
)
def __tid(self):
return self.group_id + str(time.strftime("%Y%m%d%H%M", time.localtime()))
def _grow_request_vitality(self):
_tid = self.__tid()
_time_matrix = Trigger.getKey(_tid)
if _time_matrix:
if not isinstance(_time_matrix, list):
matrix = []
matrix = _time_matrix
else:
matrix = []
matrix.append(time.time())
Trigger.setKey(_tid, matrix, exN=60 * 5)
def _get_chat_vitality(self):
_tid = self.__tid()
_time_matrix = Trigger.getKey(_tid)
if not isinstance(_time_matrix, list):
return len([])
if _time_matrix:
return len(_time_matrix)
else:
return len([])
def trigger(self, Message: User_Message, config):
"""
追踪群组消息上下文为 Catch 提供养分
:param Message:
:param config:
:return:
"""
_text = Message.text
_name = Message.from_user.name
self._grow_request_vitality()
if len(_text) < 3:
return False
self.receiver.record_message(ask=f"{_name}:{_text}", reply=".:.")
@staticmethod
def isHighestSentiment(text, cache):
now = Utils.sentiment(text).get("score")
for item in cache:
_score = Utils.sentiment(item).get("score")
if _score > now:
return False
return True
def check(self, Message: User_Message):
_text = Message.text
_min = random.randint(10, 100)
if len(_text) < 5:
return False
# 检查频次锁,提前返回
if Trigger.getKey(self.group_id):
return False
# 频次计算机器
_frequency = self._get_chat_vitality()
# 提前返回
if _frequency < 5:
return False
# 合格检查,上下文阶段
status = False
# 抽签
_lucky = random.randint(1, 100)
if _lucky > 80:
status = True
# 最后的内容检查
_check = CheckSeq()
if _check.help(_text):
status = True
if status:
status = False
_score = Utils.sentiment(_text).get("score")
if isinstance(_score, float):
if _score > 1.8 or _score < -2:
status = True
# 检查
if status:
Trigger.setKey(self.group_id, "True", exN=60 * _min)
return status
"""
# 计算初始
message_cache = self.receiver.read_memory(plain_text=True, sign=True)
message_cache: list
message_cache = [item for item in message_cache if item]
if len(message_cache) < 20:
return False
_cache = message_cache[:20]
if self.isHighestSentiment(text=_text, cache=_cache):
"""
| [] |
2024-01-10 | circlestarzero/Openaibot | App~EventServer.py | # -*- coding: utf-8 -*-
# @Time : 1/5/23 6:58 PM
# @FileName: LumiServer.py
# @Software: PyCharm
# @Github :sudoskys
# app.py
import json
import pathlib
from typing import Optional
from fastapi import FastAPI
import openai_kira
from fastapi.responses import Response
from pydantic import BaseModel
from App.Event import ContentDfa, TTSSupportCheck
from utils.Data import Service_Data, Api_keys, DefaultData, DictUpdate
from loguru import logger
_service = Service_Data.get_key()
_redis_conf = _service["redis"]
_tts_conf = _service["tts"]
_plugin_table = _service["plugin"]
global _csonfig
app = FastAPI()
# IO
def load_csonfig():
global _csonfig
now_table = DefaultData.defaultConfig()
if pathlib.Path("./Config/config.json").exists():
with open("./Config/config.json", encoding="utf-8") as f:
_csonfig = json.load(f)
else:
_csonfig = {}
DictUpdate.dict_update(now_table, _csonfig)
_csonfig = now_table
return _csonfig
load_csonfig()
openai_kira.setting.redisSetting = openai_kira.setting.RedisConfig(**_redis_conf)
openai_kira.setting.openaiApiKey = Api_keys.get_key("./Config/api_keys.json")["OPENAI_API_KEY"]
class Prompt(BaseModel):
cid: int
start_sequ: str = "Human" # 你的名字
restart_sequ: str = "Neko" # Ai 的名字
prompt: str
role: str = "" # Ai 的自我认同
character: list = None # Ai 的性格
head: str = "" # 对话的场景定位
model: str = "text-davinci-003" # 模型
class Filter(BaseModel):
prompt: str
moderation: bool = True
class Reply(BaseModel):
status: bool
data: Optional[bytes] = None
response: Optional[dict] = None
class FilterReply(BaseModel):
dfa: str
flagged: list
@app.post("/filter")
async def filter_str(check: Filter):
# 内容审计
_harm_result = []
if check.moderation:
try:
_Moderation_rep = await openai_kira.Moderations().create(input=check.prompt)
_moderation_result = _Moderation_rep["results"][0]
_harm_result = [key for key, value in _moderation_result["categories"].items() if value == True]
except Exception as e:
logger.error(f"Moderation:{check.prompt}-{e}")
return FilterReply(dfa=ContentDfa.filter_all(check.prompt), flagged=_harm_result)
@app.post("/getreply")
async def get_reply(req: Prompt):
receiver = openai_kira.Chat.Chatbot(
conversation_id=req.cid,
call_func=Api_keys.pop_api_key,
token_limit=3751,
start_sequ=req.start_sequ,
restart_sequ=req.restart_sequ,
)
try:
response = await receiver.get_chat_response(model=req.model,
prompt=str(req.prompt),
max_tokens=int(_csonfig["token_limit"]),
role=req.role,
web_enhance_server=_plugin_table,
optimizer=None,
no_penalty=not _csonfig["auto_adjust"],
character=req.character,
head=req.head,
)
_got = Reply(status=True, response=response)
except Exception as e:
logger.error(e)
return Reply(status=False)
else:
return _got
@app.get("/getvoice")
async def get_voice(text: str, cid: int):
try:
_req = await TTSSupportCheck(text=text, user_id=cid)
except Exception as e:
logger.error(e)
return Response(status_code=417)
else:
status = False
if _req:
status = True
if status:
import base64
httpRes = Response(content=_req, media_type='audio/ogg')
httpRes.headers['X-Bot-Reply'] = str(base64.b64encode(text.encode('utf-8')), 'utf-8')
return httpRes
return Response(status_code=417)
| [] |
2024-01-10 | circlestarzero/Openaibot | test~vits_nlp_test.py | # -*- coding: utf-8 -*-
# @Time : 12/20/22 10:19 PM
# @FileName: vits_nlp.py
# @Software: PyCharm
# @Github :sudoskys
import time
from openai_kira.utils.chat import Utils
res = Talk().cut_chinese_sentence(
"これから日本...大家好,我是可莉,我建议大家不要有其它的营养,所以不能只看它的热量就作为应急食品来使用。")
print(res)
from fatlangdetect import detect
t1 = time.time()
result = detect(text="你好", low_memory=True)
print(result)
result = detect(text="你好你好,これから日本", low_memory=True)
print(result)
result = detect(text="怎麼不給爺嘿嘿呢", low_memory=True)
print(result)
t2 = time.time()
print(t2 - t1)
| [] |
2024-01-10 | circlestarzero/Openaibot | App~Event.py | # -*- coding: utf-8 -*-
# @Time : 9/22/22 11:04 PM
# @FileName: Event.py
# @Software: PyCharm
# @Github :sudoskys
import asyncio
# 事件,完全隔离的
import json
import pathlib
import random
import re
import time
# from io import BytesIO
from typing import Union
from loguru import logger
import openai_kira
from openai_kira.Chat import Optimizer
from openai_kira.utils.chat import Cut
# from App.chatGPT import PrivateChat
from utils.Chat import Utils, Usage, rqParser, GroupManger, UserManger, Header, Style
from utils.Data import DictUpdate, DefaultData, Api_keys, Service_Data, User_Message, PublicReturn, ProxyConfig
from utils.Setting import ProfileReturn
from utils.TTS import TTS_Clint, TTS_REQ
from utils.Detect import DFA, Censor, get_start_name
#
# fast text langdetect
_service = Service_Data.get_key()
REDIS_CONF = _service["redis"]
TTS_CONF = _service["tts"]
PLUGIN_TABLE = _service["plugin"]
PROXY_CONF = ProxyConfig(**_service["proxy"])
HARM_TYPE = _service["moderation_type"]
HARM_TYPE.extend([
'violence',
'violence/graphic'
])
HARM_TYPE = list(set(HARM_TYPE))
# Proxy
if PROXY_CONF.status:
openai_kira.setting.proxyUrl = PROXY_CONF.url,
openai_kira.setting.redisSetting = openai_kira.setting.RedisConfig(**REDIS_CONF)
urlForm = {
"Danger.form": [
"aHR0cHM6Ly9yYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tL2Z3d2RuL3NlbnNpdGl2ZS1zdG9wLXdvcmRzL21hc3Rlci8lRTYlOTQlQkYlRTYlQjIlQkIlRTclQjElQkIudHh0",
"aHR0cHM6Ly9yYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tL1RlbGVjaGFCb3QvQW50aVNwYW0vbWFpbi9EYW5nZXIudHh0",
"aHR0cHM6Ly9yYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tL2FkbGVyZWQvRGFuZ2Vyb3VzU3BhbVdvcmRzL21hc3Rlci9EYW5nZXJvdXNTcGFtV29yZHMvR2VuZXJhbF9TcGFtV29yZHNfVjEuMC4xX0NOLm1pbi50eHQ=",
"aHR0cHM6Ly9yYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tL0phaW1pbjEzMDQvc2Vuc2l0aXZlLXdvcmQtZGV0ZWN0b3IvbWFpbi9zYW1wbGVfZmlsZXMvc2FtcGxlX2Jhbm5lZF93b3Jkcy50eHQ=",
]
}
def initCensor():
if PROXY_CONF.status:
proxies = {
'all://': PROXY_CONF.url,
} # 'http://127.0.0.1:7890' # url
return Censor.initWords(url=urlForm, home_dir="./Data/", proxy=proxies)
else:
return Censor.initWords(url=urlForm, home_dir="./Data/")
if not pathlib.Path("./Data/Danger.form").exists():
initCensor()
# 过滤器
ContentDfa = DFA(path="./Data/Danger.form")
global _csonfig
# IO
def load_csonfig():
global _csonfig
now_table = DefaultData.defaultConfig()
if pathlib.Path("./Config/config.json").exists():
with open("./Config/config.json", encoding="utf-8") as f:
_csonfig = json.load(f)
else:
_csonfig = {}
DictUpdate.dict_update(now_table, _csonfig)
_csonfig = now_table
return _csonfig
def save_csonfig(pLock=None):
if pLock:
pLock.acquire()
with open("./Config/config.json", "w+", encoding="utf8") as f:
json.dump(_csonfig, f, indent=4, ensure_ascii=False)
if pLock:
pLock.release()
async def TTSSupportCheck(text, user_id, limit: bool = True):
global TTS_CONF
"""
处理消息文本并构造请求返回字节流或者空。隶属 Event 文件
:return:
"""
if not TTS_CONF["status"]:
return
if TTS_CONF['type'] == 'none':
return
try:
from fatlangdetect import detect
lang_type = detect(text=text.replace("\n", "").replace("\r", ""), low_memory=True).get("lang").upper()
except Exception as e:
from langdetect import detect
lang_type = detect(text=text.replace("\n", "").replace("\r", ""))[0][0].upper()
if TTS_CONF["type"] == "vits":
_vits_config = TTS_CONF["vits"]
if lang_type not in ["ZH", "JA"]:
return
if len(text) > _vits_config["limit"] and limit:
return
cn_res = Cut.chinese_sentence_cut(text)
cn = {i: f"[{lang_type}]" for i in cn_res}
# 合成
_spell = [f"{cn[x]}{x}{cn[x]}" for x in cn.keys()]
_new_text = "".join(_spell)
_new_text = "[LENGTH]1.4[LENGTH]" + _new_text
# 接受数据
result, e = await TTS_Clint.request_vits_server(url=_vits_config["api"],
params=TTS_REQ(task_id=user_id,
text=_new_text,
model_name=_vits_config["model_name"],
speaker_id=_vits_config["speaker_id"],
audio_type="ogg"
))
if not result:
logger.error(f"TTS:{user_id} --type:vits --content: {text}:{len(text)} --{e}")
return
logger.info(f"TTS:{user_id} --type:vits --content: {text}:{len(text)}")
# 返回字节流
return result
# USE AZURE
elif TTS_CONF["type"] == "azure":
_azure_config = TTS_CONF["azure"]
_new_text = text
_speaker = _azure_config["speaker"].get(lang_type)
if len(text) > _azure_config["limit"]:
return
if not _speaker:
logger.info(f"TTS:{user_id} --type:azure --content: {text}:{len(text)} --this type lang not supported")
return
result, e = await TTS_Clint.request_azure_server(key=_azure_config["key"],
location=_azure_config["location"],
text=_new_text,
speaker=_speaker
)
if not result:
logger.error(f"TTS:{user_id} --type:azure --content: {text}:{len(text)} --{e}")
return
logger.info(f"TTS:{user_id} --type:azure --content: {text}:{len(text)}")
# 返回字节流
return result
else:
# 啥也没有
return
async def Forget(user_id: int, chat_id: int):
"""
重置消息流
:param chat_id:
:param user_id:
:return:
"""
from openai_kira.utils.data import MsgFlow
_cid = DefaultData.composing_uid(user_id=user_id, chat_id=chat_id)
return MsgFlow(uid=_cid).forget()
class Reply(object):
"""
群组
"""
@staticmethod
async def load_response(user,
group,
key: Union[str, list],
prompt: str = "Say this is a test",
method: str = "chat",
start_name: str = "Ai:",
restart_name: str = "Human:"
) -> str:
"""
发起请求
:param start_name: 称呼自己
:param restart_name: 称呼请求发起人
:param user:
:param group:
:param key:
:param prompt:
:param method:
:return:
"""
load_csonfig()
# 载入全局变量
if not key:
logger.error("SETTING:API key missing")
raise Exception("API key missing")
openai_kira.setting.openaiApiKey = key
# 洪水防御攻击
if Utils.WaitFlood(user=user, group=group):
return "TOO FAST"
# 长度限定
if Utils.tokenizer(str(prompt)) > _csonfig['input_limit']:
return "TOO LONG"
# 用量检测
_UsageManger = Usage(uid=user)
_Usage = _UsageManger.isOutUsage()
if _Usage["status"]:
return f"小时额度或者单人总额度用完,请申请重置或等待\n{_Usage['use']}"
# 内容审计
try:
_harm = False
_Moderation_rep = await openai_kira.Moderations().create(input=prompt)
_moderation_result = _Moderation_rep["results"][0]
_harm_result = [key for key, value in _moderation_result["categories"].items() if value == True]
for item in _harm_result:
if item in HARM_TYPE:
_harm = item
except Exception as e:
logger.error(f"Moderation:{prompt}:{e}")
_harm = False
if _harm:
_info = DefaultData.getRefuseAnswer()
await asyncio.sleep(random.randint(3, 6))
return f"{_info}\nYour Content violates Openai policy:{_harm}..."
# 内容过滤
prompt = ContentDfa.filter_all(prompt)
# 请求
try:
from openai_kira import Chat
# 计算唯一消息桶 ID
_cid = DefaultData.composing_uid(user_id=user, chat_id=group)
# 启用单人账户桶
if len(start_name) > 12:
start_name = start_name[-10:]
if len(restart_name) > 12:
restart_name = restart_name[-10:]
# 分发类型
if method == "write":
# OPENAI
response = await openai_kira.Completion(call_func=Api_keys.pop_api_key).create(
model="text-davinci-003",
prompt=str(prompt),
temperature=0.2,
frequency_penalty=1,
max_tokens=int(_csonfig["token_limit"])
)
elif method == "catch":
# 群组公用桶 ID
_oid = f"-{abs(group)}"
receiver = Chat.Chatbot(
conversation_id=int(_oid),
call_func=Api_keys.pop_api_key,
token_limit=1000,
start_sequ=start_name,
restart_sequ=restart_name,
)
response = await receiver.get_chat_response(model="text-davinci-003",
prompt=str(prompt),
optimizer=Optimizer.MatrixPoint,
role="......",
no_penalty=True,
max_tokens=100,
web_enhance_server=PLUGIN_TABLE
)
elif method == "chat":
receiver = Chat.Chatbot(
conversation_id=int(_cid),
call_func=Api_keys.pop_api_key,
token_limit=3751,
start_sequ=start_name,
restart_sequ=restart_name,
)
_head = None
if _csonfig.get("allow_change_head"):
_head = Header(uid=user).get()
_head = ContentDfa.filter_all(_head)
_style = {}
if _csonfig.get("allow_change_style"):
_style = Style(uid=user).get()
response = await receiver.get_chat_response(model="text-davinci-003",
prompt=str(prompt),
max_tokens=int(_csonfig["token_limit"]),
optimizer=Optimizer.SinglePoint,
role=_head,
web_enhance_server=PLUGIN_TABLE,
logit_bias=_style,
no_penalty=not _csonfig["auto_adjust"]
)
else:
return "NO SUPPORT METHOD"
# print(response)
_deal_rq = rqParser.get_response_text(response)
_deal = _deal_rq[0]
_usage = rqParser.get_response_usage(response)
_time = int(time.time() * 1000)
logger.success(f"CHAT:{user}:{group} --time: {_time} --prompt: {prompt} --req: {_deal} ")
except Exception as e:
logger.error(f"RUN:Api Error:{e}")
_usage = 0
_deal = f"OH no,api Outline or crash? \n {prompt}"
# 更新额度
_AnalysisUsage = _UsageManger.renewUsage(usage=_usage)
# 更新统计
DefaultData().setAnalysis(usage={f"{user}": _AnalysisUsage.total_usage})
# 人性化处理
_deal = ContentDfa.filter_all(_deal)
_deal = Utils.Humanization(_deal)
return _deal
async def WhiteUserCheck(user_id: int, WHITE: str = "") -> PublicReturn:
"""
:param user_id: user id
:param WHITE: 白名单提示
:return: TRUE,msg -> 在白名单
"""
#
if _csonfig["whiteUserSwitch"]:
# 没有在白名单里!
if UserManger(user_id).read("white"):
return PublicReturn(status=True, trace="WhiteUserCheck")
msg = f"{user_id}:Check the settings to find that you is not whitelisted!...{WHITE}"
if UserManger(user_id).read("block"):
msg = f"{user_id}:Blocked!...{WHITE}"
return PublicReturn(status=False,
trace="WhiteUserCheck",
msg=msg)
else:
return PublicReturn(status=True, trace="WhiteUserCheck")
async def WhiteGroupCheck(group_id: int, WHITE: str = "") -> PublicReturn:
"""
:param group_id: group id
:param WHITE:
:return: TRUE,msg -> 在白名单
"""
#
if _csonfig["whiteGroupSwitch"]:
# 没有在白名单里!
if GroupManger(group_id).read("white"):
return PublicReturn(status=True, trace="WhiteUserCheck")
msg = f"{group_id}:Check the settings to find that you is not whitelisted!...{WHITE}"
if GroupManger(group_id).read("block"):
msg = f"{group_id}:Blocked!...{WHITE}"
return PublicReturn(status=False,
trace="WhiteUserCheck",
msg=msg)
else:
return PublicReturn(status=True, trace="WhiteUserCheck")
async def RemindSet(user_id, text) -> PublicReturn:
"""
:param user_id:
:param text:
:return: Ture 代表设定成功
"""
_text = text
_user_id = user_id
_remind_r = _text.split(" ", 1)
if len(_remind_r) < 2:
return PublicReturn(status=False, msg=f"", trace="Remind")
_remind = _remind_r[1]
if Utils.tokenizer(_remind) > 333:
return PublicReturn(status=True, msg=f"过长:{_remind}", trace="Remind")
_remind = ContentDfa.filter_all(_remind)
if _csonfig.get("allow_change_head"):
# _remind = _remind.replace("你是", "ME*扮演")
_remind = _remind.replace("你", "ME*")
_remind = _remind.replace("我", "YOU*")
_remind = _remind.replace("YOU*", "你")
_remind = _remind.replace("ME*", "我")
Header(uid=_user_id).set(_remind)
return PublicReturn(status=True, msg=f"设定:{_remind}\nNo reply this msg", trace="Remind")
Header(uid=_user_id).set({})
return PublicReturn(status=True, msg=f"I refuse Remind Command", trace="Remind")
async def StyleSet(user_id, text) -> PublicReturn:
"""
:param user_id:
:param text:
:return: Ture 代表设定成功
"""
_text = text
_user_id = user_id
_style_r = _text.split(" ", 1)
if len(_style_r) < 2:
return PublicReturn(status=False, msg=f"", trace="StyleSet")
_style = _style_r[1]
if Utils.tokenizer(_style) > 800:
return PublicReturn(status=True, msg=f"过长:{_style}", trace="StyleSet")
_style_token_list = re.split("[,,]", _style)
_token = {}
if _csonfig.get("allow_change_style"):
for item in _style_token_list:
item = str(item)
_weight = round(item.count("(") + item.count("{") + 1 - item.count("[") * 1.5)
item = item.replace("(", "").replace("{", "").replace("[", "").replace(")", "").replace("}", "").replace(
"]", "")
_weight = _weight if _weight <= 20 else 2
_weight = _weight if _weight >= -80 else 0
_encode_token = openai_kira.utils.chat.gpt_tokenizer.encode(item)
_love = {str(token): _weight for token in _encode_token}
_child_token = {}
for token, weight in _love.items():
token = str(token)
if token in _token.keys():
__weight = _token.get(token) + _weight
else:
__weight = _weight
_child_token[token] = __weight
_token.update(_child_token)
Style(uid=_user_id).set(_token)
return PublicReturn(status=True, msg=f"Style:{_style}\nNo reply this msg", trace="StyleSet")
Style(uid=_user_id).set(_token)
return PublicReturn(status=True, msg=f"I refuse StyleSet Command", trace="StyleSet")
async def PromptPreprocess(text, types: str = "group") -> PublicReturn:
"""
消息预处理,命令识别和与配置的交互层
:param text:
:param types:
:return: TRUE,msg -> 继续执行
"""
load_csonfig()
# 拿到 prompt
_raw_prompt = text
_prompt_types = "unknown"
_prompt = ""
# 不会重复的流程组
# Chat
if _raw_prompt.startswith("/chat"):
_prompt_r = _raw_prompt.split(" ", 1)
if len(_prompt_r) > 1:
_prompt = _prompt_r[1]
_prompt_types = "chat"
# Write
if _raw_prompt.startswith("/catch"):
_prompt_r = _raw_prompt.split(" ", 1)
if len(_prompt_r) > 1:
_prompt = _prompt_r[1]
_prompt_types = "catch"
# Write
if _raw_prompt.startswith("/write"):
_prompt_r = _raw_prompt.split(" ", 1)
if len(_prompt_r) > 1:
_prompt = _prompt_r[1]
_prompt_types = "write"
# 处置空,也许是多余的
if len(_prompt) < 1:
_prompt_types = "unknown"
# 校验结果
if _prompt_types == "unknown":
# 不执行
return PublicReturn(status=False, msg=types, data=[_prompt, _prompt_types], trace="PromptPreprocess")
return PublicReturn(status=True, msg=types, data=[_prompt, _prompt_types], trace="PromptPreprocess")
async def Group(Message: User_Message, bot_profile: ProfileReturn, config) -> PublicReturn:
"""
根据文本特征分发决策
:param bot_profile:
:param Message:
:param config:
:return: True 回复用户
"""
load_csonfig()
_text = Message.text
_user_id = Message.from_user.id
_chat_id = Message.from_chat.id
_user_name = Message.from_user.name
_bot_name = bot_profile.bot_name
# 状态
if not _csonfig.get("statu"):
return PublicReturn(status=True, msg="BOT:Under Maintenance", trace="Statu")
# 白名单检查
_white_user_check = await WhiteGroupCheck(_chat_id, config.WHITE)
_white_user_check: PublicReturn
if not _white_user_check.status:
return PublicReturn(status=True,
trace="WhiteGroupCheck",
msg=_white_user_check.msg)
# 线性决策
if _text.startswith("/remind"):
_remind_set = await RemindSet(user_id=_user_id, text=_text)
_remind_set: PublicReturn
return PublicReturn(status=True,
trace="Remind",
msg=_remind_set.msg)
if _text.startswith("/style"):
_style_set = await StyleSet(user_id=_user_id, text=_text)
_style_set: PublicReturn
return PublicReturn(status=True,
trace="Style",
msg=_style_set.msg)
if _text.startswith("/forgetme"):
await Forget(user_id=_user_id, chat_id=_chat_id)
return PublicReturn(status=True, msg=f"Down,Miss you", trace="ForgetMe")
if _text.startswith("/voice"):
_user_manger = UserManger(_user_id)
_set = True
if _user_manger.read("voice"):
_set = False
_user_manger.save({"voice": _set})
return PublicReturn(status=True, msg=f"TTS:{_set}", trace="VoiceSet")
_prompt_preprocess = await PromptPreprocess(text=_text, types="group")
_prompt_preprocess: PublicReturn
if not _prompt_preprocess.status:
# 预处理失败,不符合任何触发条件,不回复捏
return PublicReturn(status=False, msg=f"No Match Type", trace="PromptPreprocess")
_prompt = _prompt_preprocess.data[0]
_reply_type = _prompt_preprocess.data[1]
try:
_name = f"{_user_name}"
_req = await Reply.load_response(user=_user_id,
group=_chat_id,
key=Api_keys.get_key("./Config/api_keys.json")["OPENAI_API_KEY"],
prompt=_prompt,
restart_name=_name,
start_name=get_start_name(prompt=_prompt, bot_name=_bot_name),
method=_reply_type
)
# message_type = "text"
_info = []
# 语音消息
_voice = UserManger(_user_id).read("voice")
voice_data = None
if _voice:
voice_data = await TTSSupportCheck(text=_req, user_id=_user_id)
if not voice_data and _voice:
_info.append("TTS Unavailable")
# message_type = "voice" if _voice and voice_data else message_type
# f"{_req}\n{config.INTRO}\n{''.join(_info)}"
_log = '\n'.join(_info)
return PublicReturn(status=True, msg=f"OK", trace="Reply", voice=voice_data, reply=f"{_req}\n{_log}")
except Exception as e:
logger.error(e)
return PublicReturn(status=True, msg=f"OK", trace="Error", reply="Error Occur~Maybe Api request rate limit~nya")
async def Friends(Message: User_Message, bot_profile: ProfileReturn, config) -> PublicReturn:
"""
根据文本特征分发决策
:param bot_profile:
:param Message:
:param config:
:return: True 回复用户
"""
load_csonfig()
_text = Message.text
_user_id = Message.from_user.id
_chat_id = Message.from_chat.id
_user_name = Message.from_user.name
_bot_name = bot_profile.bot_name
# 状态
if not _csonfig.get("statu"):
return PublicReturn(status=True, msg="BOT:Under Maintenance", trace="Statu")
# 白名单检查
_white_user_check = await WhiteUserCheck(_user_id, config.WHITE)
_white_user_check: PublicReturn
if not _white_user_check.status:
return PublicReturn(status=True,
trace="WhiteGroupCheck",
msg=_white_user_check.msg)
# 线性决策
if _text.startswith("/remind"):
_remind_set = await RemindSet(user_id=_user_id, text=_text)
_remind_set: PublicReturn
return PublicReturn(status=True,
trace="Remind",
msg=_remind_set.msg)
if _text.startswith("/style"):
_style_set = await StyleSet(user_id=_user_id, text=_text)
_style_set: PublicReturn
return PublicReturn(status=True,
trace="Style",
msg=_style_set.msg)
if _text.startswith("/forgetme"):
await Forget(user_id=_user_id, chat_id=_chat_id)
return PublicReturn(status=True, msg=f"Down,Miss you", trace="ForgetMe")
if _text.startswith("/voice"):
_user_manger = UserManger(_user_id)
_set = True
if _user_manger.read("voice"):
_set = False
_user_manger.save({"voice": _set})
return PublicReturn(status=True, msg=f"TTS:{_set}", trace="Voice")
_prompt_preprocess = await PromptPreprocess(text=_text, types="private")
_prompt_preprocess: PublicReturn
if not _prompt_preprocess.status:
# 预处理失败,不符合任何触发条件,不回复捏
return PublicReturn(status=False, msg=f"No Match Type", trace="PromptPreprocess")
_prompt = _prompt_preprocess.data[0]
_reply_type = _prompt_preprocess.data[1]
try:
_name = f"{_user_name}"
_req = await Reply.load_response(user=_user_id,
group=_chat_id,
key=Api_keys.get_key("./Config/api_keys.json")["OPENAI_API_KEY"],
prompt=_prompt,
restart_name=_name,
start_name=get_start_name(prompt=_prompt, bot_name=_bot_name),
method=_reply_type
)
message_type = "text"
_info = []
# 语音消息
_voice = UserManger(_user_id).read("voice")
voice_data = None
if _voice:
voice_data = await TTSSupportCheck(text=_req, user_id=_user_id)
if not voice_data and _voice:
_info.append("TTS Unavailable")
message_type = "voice" if _voice and voice_data else message_type
# f"{_req}\n{config.INTRO}\n{''.join(_info)}"
_data = {"type": message_type, "msg": "".join(_info), "text": _req, "voice": voice_data}
return PublicReturn(status=True,
msg=f"OK",
trace="Reply",
reply=_req + "\n".join(_info),
voice=voice_data
)
except Exception as e:
logger.error(e)
return PublicReturn(status=True, msg=f"Error Occur~Maybe Api request rate limit~nya",
trace="Error",
reply="Error Occur~Maybe Api request rate limit~nya")
async def Trigger(Message: User_Message, config) -> PublicReturn:
"""
:param Message: group id
:param config:
:return: TRUE,msg -> 在白名单
"""
group_id = Message.from_chat.id
if config.trigger:
if GroupManger(group_id).read("trigger"):
return PublicReturn(status=True, trace="TriggerCheck")
return PublicReturn(status=False, trace="No trigger")
async def GroupAdminCommand(Message: User_Message, config, pLock):
load_csonfig()
_reply = []
group_id = Message.from_chat.id
try:
command = Message.text
if command.startswith("/trigger"):
_group_manger = GroupManger(int(group_id))
_set = True
if _group_manger.read("trigger"):
_set = False
_group_manger.save({"trigger": _set})
_ev = f"Group Admin:GroupTrigger {_set}"
_reply.append(_ev)
logger.info(_ev)
#
except Exception as e:
logger.error(e)
return _reply
async def MasterCommand(user_id: int, Message: User_Message, config, pLock=None):
load_csonfig()
_reply = []
if user_id in config.master:
try:
command = Message.text
# SET
if command.startswith("/set_user_cold"):
# 用户冷静时间
_len = Utils.extract_arg(command)[0]
_len_ = "".join(list(filter(str.isdigit, _len)))
if _len_:
_csonfig["usercold_time"] = int(_len_)
_reply.append(f"user cooltime:{_len_}")
save_csonfig(pLock)
logger.info(f"SETTING:reset user cold time limit to{_len_}")
if command.startswith("/set_group_cold"):
# 群组冷静时间
_len = Utils.extract_arg(command)[0]
_len_ = "".join(list(filter(str.isdigit, _len)))
if _len_:
_csonfig["groupcold_time"] = int(_len_)
_reply.append(f"group cooltime:{_len_}")
save_csonfig(pLock)
logger.info(f"SETTING:reset group cold time limit to{_len_}")
if command.startswith("/set_per_user_limit"):
_len = Utils.extract_arg(command)[0]
_len_ = "".join(list(filter(str.isdigit, _len)))
if _len_:
_csonfig["per_user_limit"] = int(_len_)
_reply.append(f"set_hour_limit:{_len_}")
save_csonfig(pLock)
logger.info(f"SETTING:reset per_user_limit to{_len_}")
if command.startswith("/set_per_hour_limit"):
# 设定用户小时用量
_len = Utils.extract_arg(command)[0]
_len_ = "".join(list(filter(str.isdigit, _len)))
if _len_:
_csonfig["hour_limit"] = int(_len_)
_reply.append(f"hour_limit:{_len_}")
save_csonfig(pLock)
logger.info(f"SETTING:reset hour_limit to{_len_}")
if command.startswith("/promote_user_limit"):
# 设定用户小时用量
_len = Utils.extract_arg(command)
if len(_len) != 2:
return
__user_id = int("".join(list(filter(str.isdigit, _len[0]))))
__limit = int("".join(list(filter(str.isdigit, _len[1]))))
if __user_id > 0 and __limit > 0:
UserManger(__user_id).save({"usage": __limit})
_reply.append(f"user_limit:{__limit}")
logger.info(f"SETTING:promote user_limit to{__limit}")
if command.startswith("/reset_user_usage"):
# 重置用户的用量总数据
_len = Utils.extract_arg(command)
for i in _len:
_len_ = "".join(list(filter(str.isdigit, i)))
if _len_:
Usage(uid=_len_).resetTotalUsage()
logger.info(f"SETTING:resetTotalUsage {_len_} limit to 0")
_reply.append(f"hour_limit:{_len}")
if command.startswith("/set_token_limit"):
# 返回多少?
_len = Utils.extract_arg(command)[0]
_len_ = "".join(list(filter(str.isdigit, _len)))
if _len_:
_csonfig["token_limit"] = int(_len_)
_reply.append(f"tokenlimit:{_len_}")
save_csonfig(pLock)
logger.info(f"SETTING:reset tokenlimit limit to{_len_}")
if command.startswith("/set_input_limit"):
# 输入字符?
_len = Utils.extract_arg(command)[0]
_len_ = "".join(list(filter(str.isdigit, _len)))
if _len_:
_csonfig["input_limit"] = int(_len_)
_reply.append(f"input limit:{_len_}")
save_csonfig(pLock)
logger.info(f"SETTING:reset input limit to{_len_}")
if "/add_block_group" in command:
# 重置用户的用量总数据
_len = Utils.extract_arg(command)
for i in _len:
_len_ = "".join(list(filter(str.isdigit, i)))
if _len_:
_ev = f"SETTING:add block group {_len_}"
GroupManger(int(_len_)).save({"block": True})
_reply.append(_ev)
logger.info(_ev)
if "/del_block_group" in command:
# 重置用户的用量总数据
_len = Utils.extract_arg(command)
for i in _len:
_len_ = "".join(list(filter(str.isdigit, i)))
if _len_:
_ev = f"SETTING:del block group {_len_}"
GroupManger(int(_len_)).save({"block": False})
_reply.append(_ev)
logger.info(_ev)
if "/add_block_user" in command:
_len = Utils.extract_arg(command)
for i in _len:
_len_ = "".join(list(filter(str.isdigit, i)))
if _len_:
_ev = f"SETTING:add_block_userp {_len_}"
UserManger(int(_len_)).save({"block": True})
_reply.append(_ev)
logger.info(_ev)
if "/del_block_user" in command:
_len = Utils.extract_arg(command)
for i in _len:
_len_ = "".join(list(filter(str.isdigit, i)))
if _len_:
_ev = f"SETTING:del_block_user {_len_}"
UserManger(int(_len_)).save({"block": False})
_reply.append(_ev)
logger.info(_ev)
# whiteGroup
if "/add_white_group" in command:
_len = Utils.extract_arg(command)
for i in _len:
_len_ = "".join(list(filter(str.isdigit, i)))
if _len_:
_ev = f"SETTING:add_white_group {_len_}"
GroupManger(int(_len_)).save({"white": True})
_reply.append(_ev)
logger.info(_ev)
if "/del_white_group" in command:
_len = Utils.extract_arg(command)
for i in _len:
_len_ = "".join(list(filter(str.isdigit, i)))
if _len_:
_ev = f"SETTING:del_white_group {_len_}"
GroupManger(int(_len_)).save({"white": False})
_reply.append(_ev)
logger.info(_ev)
# whiteUser
if "/add_white_user" in command:
_len = Utils.extract_arg(command)
for i in _len:
_len_ = "".join(list(filter(str.isdigit, i)))
if _len_:
_ev = f"SETTING:add_white_user {_len_}"
UserManger(int(_len_)).save({"white": True})
_reply.append(_ev)
logger.info(_ev)
if "/del_white_user" in command:
_len = Utils.extract_arg(command)
for i in _len:
_len_ = "".join(list(filter(str.isdigit, i)))
if _len_:
_ev = f"SETTING:del_white_user {_len_}"
UserManger(int(_len_)).save({"white": False})
_reply.append(_ev)
logger.info(_ev)
# UPDATE
if command.startswith("/update_detect"):
keys, _error = initCensor()
if _error:
error = '\n'.join(_error)
errors = f"Error:\n{error}"
else:
# 重载 Danger 库
ContentDfa.change_words(path="./Data/Danger.form")
errors = "Success"
_reply.append(f"{'|'.join(keys)}\n\n{errors}")
# USER White
if command.startswith("/open_user_white_mode"):
_csonfig["whiteUserSwitch"] = True
_reply.append("SETTING:whiteUserSwitch ON")
save_csonfig(pLock)
logger.info("SETTING:whiteUser ON")
if command.startswith("/close_user_white_mode"):
_csonfig["whiteUserSwitch"] = False
_reply.append("SETTING:whiteUserSwitch OFF")
save_csonfig(pLock)
logger.info("SETTING:whiteUser OFF")
# GROUP White
if command.startswith("/open_group_white_mode"):
_csonfig["whiteGroupSwitch"] = True
_reply.append("ON:whiteGroup")
save_csonfig(pLock)
logger.info("SETTING:whiteGroup ON")
if command.startswith("/close_group_white_mode"):
_csonfig["whiteGroupSwitch"] = False
_reply.append("SETTING:whiteGroup OFF")
save_csonfig(pLock)
logger.info("SETTING:whiteGroup OFF")
if command.startswith("/see_api_key"):
keys = Api_keys.get_key("./Config/api_keys.json")
# 脱敏
_key = []
for i in keys["OPENAI_API_KEY"]:
_key.append(DefaultData.mask_middle(i, 10))
_info = '\n'.join(_key)
_reply.append(f"Now Have \n{_info}")
if "/add_api_key" in command:
_parser = Utils.extract_arg(command)
if _parser:
Api_keys.add_key(key=str(_parser[0]).strip())
logger.info("SETTING:ADD API KEY")
_reply.append("SETTING:ADD API KEY")
if "/del_api_key" in command:
_parser = Utils.extract_arg(command)
if _parser:
Api_keys.pop_key(key=str(_parser[0]).strip())
logger.info("SETTING:DEL API KEY")
_reply.append("SETTING:DEL API KEY")
if "/change_style" in command:
if _csonfig["allow_change_style"]:
_allow_change_style = False
else:
_allow_change_style = True
_csonfig["allow_change_style"] = _allow_change_style
_reply.append(f"SETTING:allow_change_style {_allow_change_style}")
save_csonfig(pLock)
logger.info(f"SETTING:allow_change_style {_allow_change_style}")
if "/change_head" in command:
if _csonfig["allow_change_head"]:
_allow_change_head = False
else:
_allow_change_head = True
_csonfig["allow_change_head"] = _allow_change_head
_reply.append(f"SETTING:allow_change_head {_allow_change_head}")
save_csonfig(pLock)
logger.info(f"SETTING:allow_change_head {_allow_change_head}")
if "/auto_adjust" in command:
if _csonfig["auto_adjust"]:
_adjust = False
else:
_adjust = True
_csonfig["auto_adjust"] = _adjust
_reply.append(f"SETTING:auto_adjust {_adjust}")
save_csonfig(pLock)
logger.info(f"SETTING:auto_adjust {_adjust}")
if command.startswith("/open"):
_csonfig["statu"] = True
_reply.append("SETTING:BOT ON")
save_csonfig(pLock)
logger.info("SETTING:BOT ON")
if command.startswith("/close"):
_csonfig["statu"] = False
_reply.append("SETTING:BOT OFF")
save_csonfig(pLock)
logger.info("SETTING:BOT OFF")
except Exception as e:
logger.error(e)
return _reply
async def Start(_):
return f"Ping,Use /chat start a new chat loop"
async def About(config):
return f"{config.ABOUT}"
async def Help(_):
return """
Use /chat + 句子 启动消息流,只需要回复即可交谈。48小时前的消息不能回复。
Use /write +句子 进行空白的续写。
Use /remind 设置一个场景头,全程不会被裁剪。
Use /forgetme 遗忘过去,res history。
Use /voice 开启可能的 tts 支持。
Use /trigger Admin 可以开启主动回复模式。
Use /style 定制词汇风格,中文效果较弱,(增强),[减弱]。
"""
| [
"chat",
"catch",
" ",
"write",
"unknown"
] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~10~run_llm_finetune.py | import time
import json
import io
import openai
from langchain.adapters import openai as openai_adapter
from langchain.load import load
from langsmith import schemas
# def convert_messages(example: schemas.Example) -> dict:
# messages = load.load(example.inputs)['messages']
# message_chunk = load.load(example.outputs)['generations'][0]['message']
# return {"messages": messages + [message_chunk]}
finetuning_messages = openai_adapter.convert_messages_for_finetuning(messages)
my_file = io.BytesIO()
for group in finetuning_messages:
if any(["function_call" in message for message in group]):
continue
my_file.write((json.dumps({"messages": group}) + "\n").encode('utf-8'))
my_file.seek(0)
training_file = openai.File.create(
file=my_file,
purpose='fine-tune'
)
# Wait while the file is processed
status = openai.File.retrieve(training_file.id).status
start_time = time.time()
while status != "processed":
print(f"Status=[{status}]... {time.time() - start_time:.2f}s", end="\r", flush=True)
time.sleep(5)
status = openai.File.retrieve(training_file.id).status
print(f"File {training_file.id} ready after {time.time() - start_time:.2f} seconds.")
| [] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~6~run_langchain_1.py | import argparse
from langchain import PromptTemplate
from langchain.llms import OpenAI
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--openai_api_key', type=str, default="dummuy")
parser.add_argument('--model_name', type=str, default="text-davinci-003")
parser.add_argument('--template', type=str, default="{keyword1}と{keyword2}について教えてください")
parser.add_argument('--keyword1', type=str, default="LLM")
parser.add_argument('--keyword2', type=str, default="ChatGPT")
parser.add_argument('--save_propmt_filepath', type=str, default="out_data/prompt-template-1.json")
args = parser.parse_args()
# PromptTemplate オブジェクト作成(バリデーションなし)
# prompt = PromptTemplate.from_template(
# template=args.template # 例 : "{keyword1}と{keyword2}について教えてください"
# )
# PromptTemplate オブジェクト作成(バリデーションあり)
prompt = PromptTemplate(
template=args.template, # 例 : "{keyword1}と{keyword2}について教えてください"
input_variables=["keyword1", "keyword2"],
)
# プロンプトテンプレートの json データをローカル環境に保存
prompt.save(args.save_propmt_filepath)
# プロンプト文生成
# 例 : "{keyword1}と{keyword2}について教えてください" -> "{LLM}と{ChatGPT}について教えてください"
prompt_text = prompt.format(keyword1=args.keyword1, keyword2=args.keyword2)
print("prompt_text: ", prompt_text)
# モデル定義
llm = OpenAI(
openai_api_key=args.openai_api_key,
model_name=args.model_name,
temperature=0.9, # 大きい値では出現確率が均一化され、より多様な文章が生成される傾向がある。低い値では出現確率の高い単語が優先され、より一定の傾向を持った文章が生成される傾向がある。
)
print("llm: ", llm)
# LLM推論実行
try:
response = llm(prompt=prompt_text)
print(f"response: {response}")
except Exception as e:
print(f"Excception was occurred | {e}")
exit(1)
exit(0)
| [] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~8~run_langchain_1.py | import argparse
from langchain.memory import ChatMessageHistory
from langchain.schema import messages_to_dict
from langchain.schema import messages_from_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--human_prompt', type=str, default="Hello World!")
parser.add_argument('--ai_prompt', type=str, default="Hello! How can I assist you today?")
args = parser.parse_args()
# ChatMessageHistory オブジェクト(Chat の履歴データ(History)を管理する機能)に HumanMessages や AIMessages 追加
history = ChatMessageHistory()
history.add_user_message(args.human_prompt)
history.add_ai_message(args.ai_prompt)
print(f'history.messages={history.messages}')
# 履歴の削除
history.clear()
print(f'history.messages={history.messages}')
exit(0)
| [] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~9~run_langsmith_1.py | import os
import argparse
from langsmith import Client
from langchain.chat_models import ChatOpenAI
from langchain.smith import RunEvalConfig, run_on_dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--project_name', type=str, default="eval")
parser.add_argument('--langchain_api_key', type=str, default="dummy")
parser.add_argument('--openai_api_key', type=str, default="dummy")
parser.add_argument('--dataset_name', type=str, default="Rap Battle Dataset")
args = parser.parse_args()
os.environ["LANGCHAIN_PROJECT"] = args.project_name
os.environ["LANGCHAIN_API_KEY"] = args.langchain_api_key
os.environ["OPENAI_API_KEY"] = args.openai_api_key
#--------------------------
# LangSmith のデータセット作成
#--------------------------
client = Client()
try:
dataset = client.create_dataset(
dataset_name=args.dataset_name, description="Rap battle prompts.",
)
example_inputs = [
"a rap battle between Atticus Finch and Cicero",
"a rap battle between Barbie and Oppenheimer",
"a Pythonic rap battle between two swallows: one European and one African",
"a rap battle between Aubrey Plaza and Stephen Colbert",
]
for input_prompt in example_inputs:
# データセットに example 追加
# 今回のケースでは、指定した入出文に対して LLM モデルで出力文を推論し、その出力文の評価を行うので、データセットに入力文のみを設定する
try:
client.create_example(
inputs={"question": input_prompt},
outputs=None,
dataset_id=dataset.id,
)
except Exception as e:
print(f"Excception was occurred | {e}")
# exit(1)
except Exception as e:
print(f"Excception was occurred | {e}")
# exit(1)
#--------------------------
# LLM モデル定義
#--------------------------
llm = ChatOpenAI(temperature=0.9)
print("llm: ", llm)
#--------------------------
# LangSmith の Evaluation 実行
#--------------------------
# Evaluation での評価基準を設定
eval_config = RunEvalConfig(
evaluators=[
# You can specify an evaluator by name/enum.
# In this case, the default criterion is "helpfulness"
"criteria",
# Or you can configure the evaluator
# harmfulness: 有害性
RunEvalConfig.Criteria("harmfulness"),
# misogyny: 女性蔑視
RunEvalConfig.Criteria("misogyny"),
# 歌詞は陳腐ですか?もしそうならY、まったくユニークならNと答えてください。
RunEvalConfig.Criteria(
{"cliche": "Are the lyrics cliche? Respond Y if they are, N if they're entirely unique."}
)
]
)
# 作成したデータセットに対して Evaluation 実行
try:
resp_eval = run_on_dataset(
dataset_name=args.dataset_name,
llm_or_chain_factory=llm,
evaluation=eval_config,
client=client,
verbose=True,
project_name=args.project_name,
)
print("resp_eval: ", resp_eval)
except Exception as e:
print(f"Excception was occurred | {e}")
exit(1)
exit(0)
| [] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~3~run_langchain.py | import os
import argparse
# LangChain Data connection
from langchain.document_loaders import TextLoader # LangChain Data connection の Document Loaders
from langchain.text_splitter import CharacterTextSplitter # LangChain Data connection の Text Splitters
from langchain.embeddings.openai import OpenAIEmbeddings # LangChain Data connection の Text embedding models
from langchain.vectorstores import Chroma # LangChain Data connection の VectorStores
from langchain.chains import RetrievalQA # LangChain Data connection の Retrievers
# LangChain Model I/O
from langchain.llms import OpenAI # LangChain Model I/O の Language models
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--openai_api_key', type=str, default="dummuy")
parser.add_argument('--text_path', type=str, default="in_data/kenran.txt")
parser.add_argument('--emb_model_name', type=str, default="text-embedding-ada-002")
parser.add_argument('--model_name', type=str, default="text-davinci-003")
parser.add_argument('--prompt', type=str, default="ヤガミについて教えてください")
args = parser.parse_args()
# OpenAI の API キー設定
os.environ["OPENAI_API_KEY"] = args.openai_api_key
# LangChain Data connection の Document Loaders を使用して、テキストデータ読み込み
document_loader = TextLoader(args.text_path)
documents = document_loader.load()
# LangChain Data connection の Text Splitters を使用して、テキストを分割
text_splitter = CharacterTextSplitter(
# separator = "\n", # セパレータ
chunk_size=700, # チャンクの文字数
chunk_overlap=0
)
split_documents = text_splitter.split_documents(documents)
print(f'split_documents={split_documents}')
# LangChain Data connection の Text embedding models を使用して、埋め込みモデル定義
# デフォルトでは、OpenAI 推奨の "text-embedding-ada-002" モデルが使用される
emb_model = OpenAIEmbeddings(
model=args.emb_model_name,
)
print(f'emb_model={emb_model}')
emb_result_0 = emb_model.embed_query(split_documents[0].page_content)
print(f'emb_result_0[0:10]={emb_result_0[0:10]}')
# 埋め込みモデルで分割テキストを埋め込み、埋め込みベクトルを作成し、特徴量データベース(VectorDB)に保存
# LangChain Data connection の VectorStores を使用
feature_db = Chroma.from_documents(split_documents, emb_model)
print(f'feature_db={feature_db}')
# 特徴量データベース(VectorDB)から retriever(ユーザーからの入力文に対して、外部テキストデータの分割した各文章から類似度の高い文章を検索&取得をするための機能)作成
retriever = feature_db.as_retriever(
search_kwargs={"k": 1}, # k=1 個の分割文章を検索&取得
)
print(f'retriever={retriever}')
# retriever で、ユーザーからの入力文に対して、外部テキストデータの分割した各文章から類似度の高い情報を検索&取得
print(f"retriever.get_relevant_documents(query=args.prompt)={retriever.get_relevant_documents(query=args.prompt)}")
# LLM モデル定義
llm = OpenAI(
model_name=args.model_name,
temperature=0.9, # 大きい値では出現確率が均一化され、より多様な文章が生成される傾向がある。低い値では出現確率の高い単語が優先され、より一定の傾向を持った文章が生成される傾向がある。
)
print(f'llm={llm}')
# LangChain Data connection の Retrievers を使用して、RetrievalQA Chain(質問応答 QA の取得に使用する Chain)を生成
# Chain : 複数のプロンプト入力を実行するための機能
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever
)
print(f'qa_chain={qa_chain}')
# LLM 推論実行(QA:質問応答)
try:
answer = qa_chain.run(args.prompt)
print(f'answer={answer}')
except Exception as e:
print(f"Excception was occurred | {e}")
exit(1)
exit(0)
| [] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~8~run_langchain_2.py | import argparse
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
from langchain.llms import OpenAI
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--openai_api_key', type=str, default="dummuy")
parser.add_argument('--model_name', type=str, default="text-davinci-003")
parser.add_argument('--human_prompt', type=str, default="Hello World!")
parser.add_argument('--ai_prompt', type=str, default="Hello! How can I assist you today?")
args = parser.parse_args()
# メモリ初期化
memory = ConversationBufferMemory()
# モデル定義
llm = OpenAI(
openai_api_key=args.openai_api_key,
model_name=args.model_name,
temperature=0.9, # 大きい値では出現確率が均一化され、より多様な文章が生成される傾向がある。低い値では出現確率の高い単語が優先され、より一定の傾向を持った文章が生成される傾向がある。
)
print("llm: ", llm)
# チェーンの初期化(使用する LLM と メモリオブジェクトを渡す)
conversation = ConversationChain(
llm=llm,
memory=memory
)
# LLM推論実行
try:
# 会話を開始
user_input=input("You: ")
while True:
response = conversation.predict(input=user_input)
print(f"AI: {response}")
user_input = input("You: ")
if user_input == "exit":
break
except Exception as e:
print(f"Excception was occurred | {e}")
exit(1)
exit(0)
| [] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~6~run_langchain_3.py | import argparse
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.chat_models import ChatOpenAI
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--openai_api_key', type=str, default="dummuy")
parser.add_argument('--model_name', type=str, default="gpt-3.5-turbo")
args = parser.parse_args()
# ---------------------------------
# SystemMessagePromptTemplate オブジェクト作成
# ---------------------------------
system_template="あなたは、質問者からの質問を{language}で回答するAIです。"
system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)
# ---------------------------------
# HumanMessagePromptTemplate オブジェクト作成
# ---------------------------------
human_template="質問者:{question}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
# ---------------------------------
# ChatPromptTemplate オブジェクト作成
# ---------------------------------
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
# ---------------------------------
# ChatPromptTemplate オブジェクトからプロンプト文生成
# ---------------------------------
prompt_message_list = chat_prompt.format_prompt(language="アニメ口調", question="VTuber について教えて").to_messages()
print(prompt_message_list)
# ---------------------------------
# Chat 用モデル定義
# ---------------------------------
chat_llm = ChatOpenAI(
openai_api_key=args.openai_api_key,
model_name=args.model_name,
temperature=0.9, # 大きい値では出現確率が均一化され、より多様な文章が生成される傾向がある。低い値では出現確率の高い単語が優先され、より一定の傾向を持った文章が生成される傾向がある。
)
print("chat_llm: ", chat_llm)
# ---------------------------------
# Chat 用LLM推論実行
# ---------------------------------
try:
response = chat_llm(prompt_message_list)
print(f"response: {response}")
except Exception as e:
print(f"Excception was occurred | {e}")
exit(1)
exit(0)
| [
"質問者:{question}",
"VTuber について教えて",
"[PLACEHOLDER, PLACEHOLDER]",
"あなたは、質問者からの質問を{language}で回答するAIです。"
] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~7~run_langchain_1.py | import os
import argparse
from langchain.llms import OpenAI
from langchain.agents import Tool
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--openai_api_key', type=str, default="dummuy")
parser.add_argument('--model_name', type=str, default="text-davinci-003")
parser.add_argument('--serp_api_key', type=str, default="dummuy")
args = parser.parse_args()
os.environ["SERPAPI_API_KEY"] = args.serp_api_key
# ---------------------------------
# モデル定義
# ---------------------------------
llm = OpenAI(
openai_api_key=args.openai_api_key,
model_name=args.model_name,
temperature=0.9, # 大きい値では出現確率が均一化され、より多様な文章が生成される傾向がある。低い値では出現確率の高い単語が優先され、より一定の傾向を持った文章が生成される傾向がある。
)
print("llm: ", llm)
# ---------------------------------
# LangChain Agents の Tools 定義
# Tools : Agent が外部とやり取りをするために呼び出す外部関数や外部ツール
# ---------------------------------
tools = load_tools(
[
"serpapi", # serpapi : Google 検索結果を取得する外部 API ツール
"llm-math", # llm-math : 算術計算をする LangChain ツール
],
llm=llm
)
# Tool(name='Search', description='A search engine. Useful for when you need to answer questions about current events. Input should be a search query.' ... )
# Tool(name='Calculator', description='Useful for when you need to answer questions about math.' ... )
print("tools: ", tools)
# ---------------------------------
# Agent オブジェクト作成
# ---------------------------------
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, # AgentType.ZERO_SHOT_REACT_DESCRIPTION : Tools オブジェクトの `description` フィールドなどから、どのツールを用いるかを決める Agent
verbose=True
)
# ---------------------------------
# Agent 実行(LLM 推論で最適な外部ツール実行)
# ---------------------------------
try:
response = agent.run("""
今日の広島市の最高気温を教えて。
そして、最高気温を2乗した結果を教えて。
""")
print(f"response: {response}")
except Exception as e:
print(f"Excception was occurred | {e}")
exit(1)
exit(0)
| [] |
2024-01-10 | Yagami360/ai-product-dev-tips | ml_ops~102~run_open_ai_api.py | import argparse
import openai
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--openai_api_key', type=str, default="dummuy")
parser.add_argument('--model', type=str, default="gpt-3.5-turbo")
parser.add_argument('--content', type=str, default="今日は天気が良いですね")
args = parser.parse_args()
# APIキーの設定
openai.api_key = args.openai_api_key
# 使用可能なモデルリスト
print("available models: ", openai.Model.list())
# OpenAI API 呼び出し
try:
response = openai.ChatCompletion.create(
model=args.model,
messages=[
{"role": "user", "content": args.content},
],
temperature=0.7, # number or null Optional Defaults to 1 / 大きい値では出現確率が均一化され、より多様な文章が生成される傾向がある。低い値では出現確率の高い単語が優先され、より一定の傾向を持った文章が生成される傾向がある。
top_p=1, # number or null Optional Defaults to 1 / An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.
n=1, # integer Optional Defaults to 1 / 回答の数。3を指定すれば3つの回答を得られる。
stream=False, # boolean or null Optional Defaults to falseIf set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Example Python code.
stop=None, # string / array / null Optional Defaults to null / トークンの生成を停止する文字列
max_tokens=100, # integer Optional Defaults to inf / 生成されるレスポンスのトークンの最大数
presence_penalty=0, #
frequency_penalty=0, # 2.0 から 2.0 の間の数値を指定。値が低い場合、生成された文章に既に含まれている単語やフレーズが強調されすぎて、文章の多様性が低下する可能性がある。値が髙い場合、生成された文章が同じ単語やフレーズを繰り返すことが少なくなり、より多様な文章を生成することができる。
# logit_bias={96096:20}, # {トークンID: value} で指定 / トークンの生成確率を調整するために、各トークンに対してlogit_biasを設定することができる。正の値を持つトークンは出現確率が上がり、負の値を持つトークンは出現確率が下がる。
)
print(f"response: {response}")
print(f"content: {response['choices'][0]['message']['content'].strip()}")
except Exception as e:
print(f"Excception was occurred | {e}")
exit(1)
exit(0)
| [] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~6~run_langchain_2.py | import argparse
from langchain import PromptTemplate
from langchain import FewShotPromptTemplate
from langchain.llms import OpenAI
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--openai_api_key', type=str, default="dummuy")
parser.add_argument('--model_name', type=str, default="text-davinci-003")
parser.add_argument('--save_propmt_filepath', type=str, default="out_data/prompt-template-2.json")
args = parser.parse_args()
# ---------------------------------
# PromptTemplate オブジェクト作成
# ---------------------------------
template = """
英語: {keyword1}
日本語: {keyword2}\n
"""
prompt = PromptTemplate(
template=template,
input_variables=["keyword1", "keyword2"],
)
# ---------------------------------
# FewShotPromptTemplate オブジェクト作成
# ---------------------------------
# Few-shot learning(いくつかの正解例を与えた後に、質問文を投げる形式)の正解例
# PromptTemplate の {...} 部分の値を定義することで正解例を与える
fewshot_examples = [
{
"keyword1": "cat",
"keyword2": "猫",
},
{
"keyword1": "dog",
"keyword2": "犬",
},
]
fewshot_prompt = FewShotPromptTemplate(
examples=fewshot_examples, # Few-shot learning での正解例
example_prompt=prompt, # PromptTemplate オブジェクト
prefix="英語を日本語に翻訳してください", # プロンプト(質問文)
suffix="英語 : {input}\n", # Few-shot learning での正解例における接頭語(この例では "英語 : ")
input_variables=["input"], # suffix の {...} の変数名
example_separator="\n\n",
)
# プロンプトテンプレートの json データをローカル環境に保存
fewshot_prompt.save(args.save_propmt_filepath)
# ---------------------------------
# FewShotPromptTemplate オブジェクトからプロンプト文生成
# ---------------------------------
prompt_text = fewshot_prompt.format(input="cheese")
print("prompt_text: ", prompt_text)
# ---------------------------------
# モデル定義
# ---------------------------------
llm = OpenAI(
openai_api_key=args.openai_api_key,
model_name=args.model_name,
temperature=0.9, # 大きい値では出現確率が均一化され、より多様な文章が生成される傾向がある。低い値では出現確率の高い単語が優先され、より一定の傾向を持った文章が生成される傾向がある。
)
print("llm: ", llm)
# ---------------------------------
# LLM推論実行
# ---------------------------------
try:
response = llm(prompt=prompt_text)
print(f"response: {response}")
except Exception as e:
print(f"Excception was occurred | {e}")
exit(1)
exit(0)
| [
"\n\n",
"\n 英語: {keyword1}\n 日本語: {keyword2}\n\n ",
"英語を日本語に翻訳してください",
"input",
"英語 : {input}\n"
] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~11~run_llm_feedback.py | import os
from langchain import chat_models, prompts, callbacks
from langchain.schema import output_parser
from langsmith import Client
chain = (
prompts.ChatPromptTemplate.from_messages(
[
# 'human', 'ai', or 'system'.
("system", "You are a conversational bot."),
("human", "Hello world!"),
]
)
| chat_models.ChatOpenAI()
| output_parser.StrOutputParser()
)
with callbacks.collect_runs() as cb:
for tok in chain.stream({"input": "Hi, I'm Clara"}):
print(tok, end="", flush=True)
run_id = cb.traced_runs[0].ids
client = Client()
# ... User copies the generated response
client.create_feedback(run_id, "did_copy", score=True)
# ... User clicks a thumbs up button
client.create_feedback(run_id, "thumbs_up", score=True)
| [] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~2~run_langchain.py | import argparse
from langchain.llms import OpenAI
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--openai_api_key', type=str, default="dummuy")
parser.add_argument('--model_name', type=str, default="text-davinci-003")
parser.add_argument('--prompt', type=str, default="今日は天気が良いですね")
args = parser.parse_args()
# モデル定義
llm = OpenAI(
openai_api_key=args.openai_api_key,
model_name=args.model_name,
temperature=0.9, # 大きい値では出現確率が均一化され、より多様な文章が生成される傾向がある。低い値では出現確率の高い単語が優先され、より一定の傾向を持った文章が生成される傾向がある。
)
print("llm: ", llm)
# LLM推論実行
try:
response = llm(prompt=args.prompt)
print(f"response: {response}")
except Exception as e:
print(f"Excception was occurred | {e}")
exit(1)
exit(0)
| [] |
2024-01-10 | Yagami360/ai-product-dev-tips | nlp_processing~5~run_llm_call.py | import os
import argparse
from langchain.chat_models import ChatOpenAI
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--project_name', type=str, default="default")
parser.add_argument('--langchain_api_key', type=str, default="dummy")
parser.add_argument('--openai_api_key', type=str, default="dummy")
parser.add_argument('--prompt', type=str, default="Hello, world!")
args = parser.parse_args()
os.environ["LANGCHAIN_PROJECT"] = args.project_name
os.environ["LANGCHAIN_API_KEY"] = args.langchain_api_key
os.environ["OPENAI_API_KEY"] = args.openai_api_key
# モデル定義
llm = ChatOpenAI(temperature=0.9)
print("llm: ", llm)
# LLM推論実行
try:
result = llm.predict(args.prompt)
print(f'result={result}')
except Exception as e:
print(f"Excception was occurred | {e}")
exit(1)
exit(0)
| [] |
2024-01-10 | minhngt62/nlp-vabsa | vabsa~ml~vectorizers.py | from sklearn.feature_extraction.text import TfidfVectorizer
import gensim
from gensim.corpora import Dictionary
from gensim.models import CoherenceModel
import os
import numpy as np
class DenseTfidfVectorizer(TfidfVectorizer):
def transform(self, raw_documents):
X = super().transform(raw_documents)
return X.toarray()
def fit_transform(self, raw_documents, y=None):
X = super().fit_transform(raw_documents, y=y)
return X.toarray()
class LDA:
def __init__(
self,
ckpts_dir="checkpoints",
num_topics=48,
alpha=0.1,
eta=0.1,
iterations=1000
):
self.ckpts_dir = ckpts_dir
self.params = {'num_topics':num_topics, 'alpha':alpha, 'eta':eta, 'iterations':iterations}
self.model = None
def fit(self, X, y=None):
self.X = X
self.dictionary = Dictionary(text.split() for text in X)
self.train_corpus = [self.dictionary.doc2bow(text.split()) for text in X] # (word_idx, freq_count)
self.model = gensim.models.LdaMulticore(id2word=self.dictionary, minimum_probability=0.000, **self.params)
self.model.update(self.train_corpus)
return self
def save(self, model_name='test'):
os.makedirs(os.path.join(self.ckpts_dir, model_name), exist_ok=True)
self.model.save(os.path.join(self.ckpts_dir, model_name, model_name))
def load(self, model_name='test'):
self.model = gensim.models.LdaMulticore.load(os.path.join(self.ckpts_dir, model_name, model_name))
self.dictionary = self.model.id2word
def predict(self, document:str):
document = document.split()
document = self.dictionary.doc2bow(document)
topics = self.model.get_document_topics(document)
result = []
for topic in topics:
result.append(topic[1])
return np.array(result)
def score(self, *args, **kwargs):
score_fn = CoherenceModel(model=self.model, texts=[text.split() for text in self.X], dictionary=self.dictionary, coherence='c_v')
return score_fn.get_coherence()
def get_params(self, deep=False):
return self.params
def set_params(self, **parameters):
self.params = parameters
return self | [] |
2024-01-10 | Nivix047/kb_generator | retrieve.py | import os
import dotenv
import openai
import pinecone
# Load environment variables from .env file
dotenv.load_dotenv(dotenv_path=".env")
# Function to retrieve text based on a query using Pinecone
def retrieve(query, index):
try:
# Create text embeddings for the query
res = openai.Embedding.create(input=[query], engine="text-embedding-ada-002")
xq = res['data'][0]['embedding']
# Query the Pinecone index with the embeddings
res = index.query(xq, top_k=1, include_metadata=True)
if res['matches']:
# If matches found, prepare a prompt with context
context = res['matches'][0]['metadata']['text']
prompt = f"Answer the question based on the context below.\n\ncontext:\n{context}\n\nQuestion: {query}\n\nAnswer:"
return prompt
else:
# Return a message if no relevant information is found
return "No relevant information found in the index for the given query."
except Exception as e:
print(f"An error occurred during retrieval: {e}")
return None
# Function to complete a given prompt using GPT-3.5-Turbo-Instruct
def complete(prompt):
try:
if prompt:
# Generate a completion based on the prompt
response = openai.Completion.create(engine='gpt-3.5-turbo-instruct', prompt=prompt, max_tokens=400)
return response['choices'][0]['text'].strip()
else:
# Return a message if the prompt is missing
return "No completion available due to missing prompt."
except Exception as e:
print(f"An error occurred during completion generation: {e}")
return None
# Main function to execute the script
def main():
try:
# Initialize OpenAI and Pinecone with API keys
openai.api_key = os.getenv("OPENAI_API_SECRET")
if not openai.api_key:
raise ValueError("OpenAI API key not found. Please check your .env file.")
pinecone_api_key = os.getenv("PINECONE_API_KEY")
if not pinecone_api_key:
raise ValueError("Pinecone API key not found. Please check your .env file.")
pinecone.init(api_key=pinecone_api_key, environment="us-west1-gcp-free")
# Retrieve Pinecone index name from environment variables
index_name = os.getenv("PINECONE_INDEX_NAME")
if not index_name:
raise ValueError("Pinecone index name not found in environment variables.")
# Load the Pinecone index
index = pinecone.Index(index_name=index_name)
# Example usage of the retrieve and complete functions
query = "What do $gt and $gte do in MongoDB?"
query_with_context = retrieve(query, index)
completion = complete(query_with_context)
if completion:
print(completion)
else:
print("No completion was generated.")
except ValueError as ve:
print(ve)
except Exception as e:
print(f"An unexpected error occurred: {e}")
# Execute the main function
if __name__ == "__main__":
main()
| [
"Answer the question based on the context below.\n\ncontext:\nPLACEHOLDER\n\nQuestion: What do $gt and $gte do in MongoDB?\n\nAnswer:"
] |
2024-01-10 | mbchang/data-driven-characters | data_driven_characters~memory~retrieval.py | from typing import Any, List, Dict
from langchain.memory import VectorStoreRetrieverMemory
from langchain.schema import Document
class ConversationVectorStoreRetrieverMemory(VectorStoreRetrieverMemory):
input_prefix = "Human"
output_prefix = "AI"
blacklist = [] # keys to ignore
def _form_documents(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> List[Document]:
"""Format context from this conversation to buffer."""
# Each document should only include the current turn, not the chat history
filtered_inputs = {
k: v
for k, v in inputs.items()
if k != self.memory_key and k not in self.blacklist
}
texts = []
for k, v in list(filtered_inputs.items()) + list(outputs.items()):
if k == "input":
k = self.input_prefix
elif k == "response":
k = self.output_prefix
texts.append(f"{k}: {v}")
page_content = "\n".join(texts)
return [Document(page_content=page_content)]
| [] |
2024-01-10 | mbchang/data-driven-characters | data_driven_characters~chatbots~summary_retrieval.py | import faiss
from tqdm import tqdm
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.docstore import InMemoryDocstore
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.memory import (
ConversationBufferMemory,
CombinedMemory,
)
from langchain.prompts import PromptTemplate
from langchain.vectorstores import FAISS
from data_driven_characters.memory import ConversationVectorStoreRetrieverMemory
class SummaryRetrievalChatBot:
def __init__(self, character_definition, documents):
self.character_definition = character_definition
self.documents = documents
self.num_context_memories = 12
self.chat_history_key = "chat_history"
self.context_key = "context"
self.input_key = "input"
self.chain = self.create_chain(character_definition)
def create_chain(self, character_definition):
conv_memory = ConversationBufferMemory(
memory_key=self.chat_history_key, input_key=self.input_key
)
context_memory = ConversationVectorStoreRetrieverMemory(
retriever=FAISS(
OpenAIEmbeddings().embed_query,
faiss.IndexFlatL2(1536), # Dimensions of the OpenAIEmbeddings
InMemoryDocstore({}),
{},
).as_retriever(search_kwargs=dict(k=self.num_context_memories)),
memory_key=self.context_key,
output_prefix=character_definition.name,
blacklist=[self.chat_history_key],
)
# add the documents to the context memory
for i, summary in tqdm(enumerate(self.documents)):
context_memory.save_context(inputs={}, outputs={f"[{i}]": summary})
# Combined
memory = CombinedMemory(memories=[conv_memory, context_memory])
prompt = PromptTemplate.from_template(
f"""Your name is {character_definition.name}.
Here is how you describe yourself:
---
{character_definition.long_description}
---
You will have a conversation with a Human, and you will engage in a dialogue with them.
You will exaggerate your personality, interests, desires, emotions, and other traits.
You will stay in character as {character_definition.name} throughout the conversation, even if the Human asks you questions that you don't know the answer to.
You will not break character as {character_definition.name}.
You are {character_definition.name} in the following story snippets, which describe events in your life.
---
{{{self.context_key}}}
---
Current conversation:
---
{character_definition.name}: {character_definition.greeting}
{{{self.chat_history_key}}}
---
Human: {{{self.input_key}}}
{character_definition.name}:"""
)
GPT3 = ChatOpenAI(model_name="gpt-3.5-turbo")
chatbot = ConversationChain(
llm=GPT3, verbose=True, memory=memory, prompt=prompt
)
return chatbot
def greet(self):
return self.character_definition.greeting
def step(self, input):
return self.chain.run(input=input)
| [] |
2024-01-10 | mbchang/data-driven-characters | data_driven_characters~character.py | from dataclasses import dataclass, asdict
import json
import os
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, LLMChain
from data_driven_characters.chains import FitCharLimit, define_description_chain
from data_driven_characters.constants import VERBOSE
from data_driven_characters.utils import (
order_of_magnitude,
apply_file_naming_convention,
)
@dataclass
class Character:
name: str
short_description: str
long_description: str
greeting: str
def generate_character_ai_description(name, corpus_summaries, char_limit):
"""Generate a character description with a certain number of characters."""
lower_limit = char_limit - 10 ** (order_of_magnitude(char_limit))
description_chain = define_description_chain()
GPT4 = ChatOpenAI(model_name="gpt-4")
char_limit_chain = FitCharLimit(
chain=description_chain,
character_range=(lower_limit, char_limit),
llm=GPT4,
verbose=VERBOSE,
)
description = char_limit_chain.run(
corpus_summaries="\n\n".join(corpus_summaries),
description=f"{lower_limit}-character description", # specify a fewer characters than the limit
name=name,
)
return description
def generate_greeting(name, short_description, long_description):
"""Generate a greeting for a character."""
greeting_template = """Here are a short and long description for a character named {name}:
Short description:
---
{short_description}
---
Long description:
---
{long_description}
---
Generate a greeting that {name} would say to someone they just met, without quotations.
This greeting should reflect their personality.
"""
GPT3 = ChatOpenAI(model_name="gpt-3.5-turbo")
greeting = LLMChain(
llm=GPT3, prompt=PromptTemplate.from_template(greeting_template)
).run(
name=name,
short_description=short_description,
long_description=long_description,
)
# strip quotations
greeting = greeting.replace('"', "")
return greeting
def generate_character_definition(name, corpus_summaries):
"""Generate a Character.ai definition."""
short_description = generate_character_ai_description(
name=name, corpus_summaries=corpus_summaries, char_limit=50
)
long_description = generate_character_ai_description(
name=name, corpus_summaries=corpus_summaries, char_limit=500
)
greeting = generate_greeting(name, short_description, long_description)
# populate the dataclass
character_definition = Character(
name=name,
short_description=short_description,
long_description=long_description,
greeting=greeting,
)
return character_definition
def get_character_definition(name, corpus_summaries, cache_dir, force_refresh=False):
"""Get a Character.ai definition from a cache or generate it."""
cache_path = f"{cache_dir}/{apply_file_naming_convention(name)}.json"
if not os.path.exists(cache_path) or force_refresh:
character_definition = generate_character_definition(name, corpus_summaries)
with open(cache_path, "w") as f:
json.dump(asdict(character_definition), f)
else:
with open(cache_path, "r") as f:
character_definition = Character(**json.load(f))
return character_definition
| [
"Here are a short and long description for a character named {name}:\n\nShort description:\n---\n{short_description}\n---\n\nLong description:\n---\n{long_description}\n---\n\nGenerate a greeting that {name} would say to someone they just met, without quotations.\nThis greeting should reflect their personality.\n"
] |
2024-01-10 | mbchang/data-driven-characters | data_driven_characters~corpus.py | import json
import os
from langchain import PromptTemplate, LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from data_driven_characters.constants import VERBOSE
def generate_docs(corpus, chunk_size, chunk_overlap):
"""Generate docs from a corpus."""
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
docs = text_splitter.create_documents([corpus])
return docs
def load_docs(corpus_path, chunk_size, chunk_overlap):
"""Load the corpus and split it into chunks."""
with open(corpus_path) as f:
corpus = f.read()
docs = generate_docs(corpus, chunk_size, chunk_overlap)
return docs
def generate_corpus_summaries(docs, summary_type="map_reduce"):
"""Generate summaries of the story."""
GPT3 = ChatOpenAI(model_name="gpt-3.5-turbo")
chain = load_summarize_chain(
GPT3, chain_type=summary_type, return_intermediate_steps=True, verbose=True
)
summary = chain({"input_documents": docs}, return_only_outputs=True)
intermediate_summaries = summary["intermediate_steps"]
return intermediate_summaries
def get_corpus_summaries(docs, summary_type, cache_dir, force_refresh=False):
"""Load the corpus summaries from cache or generate them."""
if not os.path.exists(cache_dir) or force_refresh:
os.makedirs(cache_dir, exist_ok=True)
if VERBOSE:
print("Summaries do not exist. Generating summaries.")
intermediate_summaries = generate_corpus_summaries(docs, summary_type)
for i, intermediate_summary in enumerate(intermediate_summaries):
with open(os.path.join(cache_dir, f"summary_{i}.txt"), "w") as f:
f.write(intermediate_summary)
else:
if VERBOSE:
print("Summaries already exist. Loading summaries.")
intermediate_summaries = []
for i in range(len(os.listdir(cache_dir))):
with open(os.path.join(cache_dir, f"summary_{i}.txt")) as f:
intermediate_summaries.append(f.read())
return intermediate_summaries
def generate_characters(corpus_summaries, num_characters):
"""Get a list of characters from a list of summaries."""
GPT4 = ChatOpenAI(model_name="gpt-4")
characters_prompt_template = """Consider the following corpus.
---
{corpus_summaries}
---
Give a line-separated list of all the characters, ordered by importance, without punctuation.
"""
characters = LLMChain(
llm=GPT4, prompt=PromptTemplate.from_template(characters_prompt_template)
).run(corpus_summaries="\n\n".join(corpus_summaries))
# remove (, ), and " for each element of list
return characters.split("\n")[:num_characters]
def get_characters(corpus_summaries, num_characters, cache_dir, force_refresh=False):
cache_file = os.path.join(cache_dir, "characters.json")
if not os.path.exists(cache_file) or force_refresh:
characters = generate_characters(corpus_summaries, num_characters)
with open(cache_file, "w") as f:
json.dump(characters, f)
else:
with open(cache_file, "r") as f:
characters = json.load(f)
return characters
| [
"Consider the following corpus.\n ---\n {corpus_summaries}\n ---\n Give a line-separated list of all the characters, ordered by importance, without punctuation.\n "
] |
2024-01-10 | D3Mlab/Recipe-MPR | baselines~monolithic~FewShot_Baselines.py | from helper import *
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel, AutoTokenizer
from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel, OPTForCausalLM, GPT2Tokenizer
from sklearn.utils import shuffle
from torch import optim
from tqdm import tqdm
import time
import json
import csv
import random
import numpy as np
import pandas as pd
from helper import get_model_and_tokenizer
def few_shot_prompt(q, add_answer=False):
text = "input: " + q["query"] + " \n"
text += "output: "
if add_answer:
text += q['options'][q['answer']] + " \n"
return text
def get_fewshot_NLL_score(model, tokenizer, condition, text, filler=' so I recommend ', normalize=False):
text = condition + filler + text
encodings = tokenizer(text, return_tensors="pt") # input to model
condition = tokenizer(condition, return_tensors="pt")
stride = condition.input_ids.size(1)
nlls = []
begin_loc = 0
end_loc = stride
trg_len = encodings.input_ids.size(1) - stride
input_ids = encodings.input_ids.to('cuda')
target_ids = input_ids.clone()
with torch.no_grad():
outputs = model(input_ids, labels=target_ids)
neg_log_likelihood = outputs[0]
logits = outputs[1].cpu()
pred = np.argmax(logits, axis=-1)[0]
if normalize:
with torch.no_grad():
c_input_ids = condition.input_ids.to('cuda')
outputs = model(c_input_ids, labels=c_input_ids)
c_neg_log_likelihood = outputs[0]
return (-1 * neg_log_likelihood) - (-1 * c_neg_log_likelihood)
else:
return -1 * neg_log_likelihood
def fewshot(dataset, model, tokenizer, normalize, prompt):
predictions = []
output_message = ""
# loop through each query
for sample in dataset:
# count += 1
# if count % 50 == 0:
# print('--> ', count)
# for key in sample['query_type']:
# if sample['query_type'][key] == 1:
# type_count[key] += 1
# output_message += str(count) + ' Query: ' + sample["query"] + ' \n'
scores = []
q_text = sample["query"]
p = few_shot_prompt(sample,False)
q_text = prompt + p
for key in sample["options"]:
score = get_fewshot_NLL_score(model, tokenizer, q_text, sample["options"][key], normalize=normalize, filler='')
assert not torch.isnan(score), 'score is nan'
scores.append([key,score])
# if key == sample["answer"]:
# output_message += 'Answer: ' + str(score) + ' ' + sample["options"][key] + ' \n'
# else:
# output_message += str(score) + ' ' + sample["options"][key] + ' \n'
def takeSecond(elem):
return elem[1]
# sort list with key
scores.sort(key=takeSecond, reverse=True)
predicted_id = scores[0][0]
predictions.append(sample["options"][predicted_id])
return predictions
def FS_pred(train_splits, test_splits, model_name,prompt_size=5):
# results_file = "FewShot_" + name + "_" + str(prompt_size) + ".csv"
model_class, tokenizer_class = get_model_and_tokenizer(model_name)
model = model_class.from_pretrained(model_name).to('cuda')
tokenizer = tokenizer_class.from_pretrained(model_name)
all_preds = []
prompt = ''
# prompt_size = 5
# generate prompt sample
for index in range(prompt_size):
p = few_shot_prompt(train_splits[index], True)
prompt += p
# print("Prompt:")
# print(prompt)
predictions = fewshot(test_splits, model, tokenizer, normalize=True, prompt=prompt)
return predictions | [] |
2024-01-10 | D3Mlab/Recipe-MPR | baselines~aspects~Aspect_GPT3_Text_Baseline.py | from helper import *
import openai
import time
from tqdm import tqdm
import random
import numpy as np
import re
from string import whitespace
from sklearn.utils import shuffle
import requests
from requests.exceptions import ConnectionError
openai.api_key = 'API_KEY'
def create_prompt(prompt_prefix, aspect, options_list):
query = "Aspect: " + aspect + " \n\n"
options = "Options: \n"
for i in range(len(options_list)):
options += str(i) + ". " + options_list[i] + "\n"
# ask for specific score format in text response
prompt_suffix = '''
Please answer in a format that looks like this:
Option 0 Score: _
Option 1 Score: _
Option 2 Score: _
Option 3 Score: _
Option 4 Score: _ '''
full_prompt = prompt_prefix + query + options + prompt_suffix
return full_prompt
def create_fs_prefix(prompt_prefix, train_data,prompt_size):
sample_data = random.sample(train_data, prompt_size)
prompt_text = prompt_prefix
for s in sample_data:
for a in list(s["correctness_explanation"].keys()):
query = "Aspect: " + a + " \n\n"
options = "Options: \n"
ans_ind = 0
options_list = [val for val in s['options'].values()]
# shuffle order of options in prompt
random.shuffle(options_list)
prompt_suffix = '\n'
for i in range(len(options_list)):
options += str(i) + ". " + options_list[i] + "\n"
# assign correct option a score of 1 and all other options a score of 0
if options_list[i] == s['options'][s['answer']]:
ans_ind = i
prompt_suffix += 'Option ' + str(i) + ' Score: ' + str(1) + '\n'
else:
prompt_suffix += 'Option ' + str(i) + ' Score: ' + str(0) + '\n'
full_prompt = query + options + prompt_suffix + '\n'
prompt_text += full_prompt
return prompt_text
# gets response from API
def get_response(prompt):
tries = 5
while True:
tries -= 1
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0,
max_tokens=60
)
break
except ConnectionError as err:
if tries == 0:
raise err
else:
time.sleep(5)
#print(response)
return response.choices[0].text
def parse_score_text(query, responses):
all_scores = []
# scores for each aspect
for text in responses:
split_text = text.splitlines()
split_text = [x for x in split_text if x not in whitespace]
aspect_scores = []
option_score_dict = {}
# scores for each option
for span in split_text:
option_num = re.search('Option (\d+)', span)
option_score = re.search('Score: (\d+)', span)
if option_num == None or option_score == None:
print("Invalid option num or score")
continue
option_num = int(option_num.group(1))
option_score = float(option_score.group(1))
option_score_dict.update({option_num:option_score})
# check if the number of scores corresponds to the number of options
if len(option_score_dict) != 5:
print("Invalid scores. Query: {}".format(query))
print(text)
print(option_score_dict)
invalid += 1
continue
# get list of scores for each option in order
for i in range(5):
aspect_scores.append(option_score_dict[i])
all_scores.append(aspect_scores)
return all_scores
def aspect_gpt3_pred(train_data, test_data, agg_fcn, prompt_size=5, fewshot=False):
# prompt instruction
prefix = "Given the preference aspect and five options, generate a list of scores for how well each option satisfies the query: \n\n"
if fewshot:
prefix = create_fs_prefix(prefix, train_data, prompt_size)
predictions = []
N = len(test_data)
for i in tqdm(range(0, N, 1)):
# limit API requests per minute
if (i + 1) % 10 == 0:
time.sleep(60)
sample = test_data[i]
options_list = [val for val in sample['options'].values()]
query = sample['query']
correct_answer = sample['options'][sample['answer']]
aspects = sample['correctness_explanation'].keys()
responses = []
for a in aspects:
prompt = create_prompt(prefix, a, options_list)
# get API response
response = get_response(prompt)
responses.append(response)
# parse text scores into float scores
scores = parse_score_text(query, responses)
agg_scores = aggregate(scores, agg_fcn)
agg_scores, options_shuffled = shuffle(agg_scores, options_list, random_state=0)
args = np.argsort(agg_scores)
predicted = options_shuffled[args[-1]]
predictions.append(predicted)
return predictions | [
"\n",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER\n",
"Option PLACEHOLDER Score: 1\n",
"Option PLACEHOLDER Score: 0\n",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERPLACEHOLDER",
"\nPlease answer in a format that looks like this:\nOption 0 Score: _\nOption 1 Score: _\nOption 2 Score: _\nOption 3 Score: _\nOption 4 Score: _ "
] |
2024-01-10 | D3Mlab/Recipe-MPR | baselines~aspects~Aspect_FewShot_Baselines.py | from helper import *
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel, AutoTokenizer
from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel, OPTForCausalLM, GPT2Tokenizer
from sklearn.utils import shuffle
from torch import optim
from tqdm import tqdm
import time
import json
import csv
import random
import numpy as np
import pandas as pd
from helper import get_model_and_tokenizer
# for generating set prompts from ground-truth aspects
def few_shot_prompt(q, add_answer=False):
text = ""
for key in list(q['correctness_explanation'].keys()):
text += "input: " + str(key) + ' \n'
text += "output: "
if add_answer:
text += q['options'][q['answer']] + " \n"
return text
def get_fewshot_NLL_score(model,tokenizer,condition,text,filler=' so I recommend ',normalize=False):
text = condition + filler + text
encodings = tokenizer(text, return_tensors="pt")
condition = tokenizer(condition, return_tensors="pt")
stride = condition.input_ids.size(1)
nlls = []
begin_loc = 0
end_loc = stride
trg_len = encodings.input_ids.size(1) - stride
input_ids = encodings.input_ids.to('cuda')
target_ids = input_ids.clone()
with torch.no_grad():
outputs = model(input_ids, labels=target_ids)
neg_log_likelihood = outputs[0]
if normalize:
with torch.no_grad():
c_input_ids = condition.input_ids.to('cuda')
outputs = model(c_input_ids, labels=c_input_ids)
c_neg_log_likelihood = outputs[0]
return (-1 * neg_log_likelihood) - (-1 * c_neg_log_likelihood)
else:
return -1 * neg_log_likelihood
def aspect_FS_pred(train_data,test_dataset, model_name, agg_fcn, normalize=True,prompt_size=5):
model_class, tokenizer_class = get_model_and_tokenizer(model_name)
model = model_class.from_pretrained(model_name).to('cuda')
tokenizer = tokenizer_class.from_pretrained(model_name)
output_message = ""
predictions = []
# loop through each query
for sample in test_dataset:
# count += 1
# if count % 50 == 0:
# print('--> ',count)
# for key in sample['query_type']:
# if sample['query_type'][key] == 1:
# type_count[key] += 1
prompt = ''
# prompt_size = 5
# generate prompt sample
for index in range(prompt_size):
p = few_shot_prompt(train_data[index],True)
prompt += p
# output_message += str(count) + ' Query: ' + sample["query"] + ' \n'
q_text = sample["query"]
aspects = sample["correctness_explanation"].keys()
options_list = [val for val in sample["options"].values()]
all_scores = []
for a in aspects:
p ="input: " + a + "\n"
q_text = prompt + p
scores = []
for key in sample["options"]:
score = get_fewshot_NLL_score(model, tokenizer, q_text, sample["options"][key], normalize=normalize, filler='')
assert not torch.isnan(score), 'score is nan'
scores.append(float(score))
# if key == sample["answer"]:
# output_message += 'Answer: ' + str(score) + ' ' + sample["options"][key] + ' \n'
# else:
# output_message += str(score) + ' ' + sample["options"][key] + ' \n'
all_scores.append(scores)
agg_scores = aggregate(all_scores, agg_fcn)
agg_scores, options_list = shuffle(agg_scores, options_list, random_state=0)
args = np.argsort(agg_scores)
predicted_id = options_list[args[-1]]
predictions.append(predicted_id)
return predictions | [] |
2024-01-10 | D3Mlab/Recipe-MPR | baselines~monolithic~GPT3_Text_Baseline.py | import openai
import time
from tqdm import tqdm
import random
import requests
from requests.exceptions import ConnectionError
openai.api_key = 'API_KEY'
def create_prompt(prompt_prefix, query, options_list):
query = "Query: " + query + " \n\n"
options = "Options: \n"
for i in range(len(options_list)):
options += str(i) + ". " + options_list[i] + "\n"
prompt_suffix = "\nOption: "
full_prompt = prompt_prefix + query + options + prompt_suffix
return full_prompt
def create_fs_prefix(prompt_prefix, train_data, prompt_size):
# generates sample text
sample_data = random.sample(train_data, prompt_size)
prompt_text = prompt_prefix
for s in sample_data:
query = "Query: " + s["query"] + " \n\n"
options = "Options: \n"
options_list = [val for val in s['options'].values()]
# shuffle order of options in prompt
random.shuffle(options_list)
for i in range(len(options_list)):
options += str(i) + ". " + options_list[i] + "\n"
prompt_suffix = "\nOption: " + s['options'][s['answer']] + "\n\n"
full_prompt = query+options+prompt_suffix
prompt_text += full_prompt
return prompt_text
# gets response from API
def get_response(prompt):
tries = 5
while True:
tries -= 1
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0,
max_tokens=60
)
break
except ConnectionError as err:
if tries == 0:
raise err
else:
time.sleep(5)
#print(response)
return response.choices[0].text
def gpt3_pred(train_data, test_data, prompt_size= 5, fewshot=False):
# prompt instruction
prefix = "Given the recipe query and five options, choose the option that best satisfies the query: \n\n"
if fewshot:
prefix = create_fs_prefix(prefix, train_data, prompt_size)
predictions = []
N = len(test_data)
for i in tqdm(range(0, N, 1)):
# limit API requests per minute
if (i + 1) % 10 == 0:
time.sleep(60)
sample = test_data[i]
options_list = [val for val in sample['options'].values()]
query = sample['query']
correct_answer = sample['options'][sample['answer']]
prompt = create_prompt(prefix, query, options_list)
# get API response
answer = get_response(prompt)
# if generated text response has correct answer description
if correct_answer in answer:
predictions.append(correct_answer)
else:
predictions.append(answer)
return predictions | [
"\n\n",
"\nOption: ",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"options",
"answer",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | ByChelsea/VAND-APRIL-GAN | open_clip~factory.py | import json
import logging
import os
import pathlib
import re
import numpy as np
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
import torch
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
resize_pos_embed, get_cast_dtype
from .coca_model import CoCa
from .loss import ClipLoss, DistillClipLoss, CoCaLoss
from .openai import load_openai_model
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf
from .transform import image_transform, AugmentationCfg
from .tokenizer import HFTokenizer, tokenize
HF_HUB_PREFIX = 'hf-hub:'
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def get_tokenizer(model_name):
if model_name.startswith(HF_HUB_PREFIX):
tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])
else:
config = get_model_config(model_name)
tokenizer = HFTokenizer(
config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
return tokenizer
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def load_checkpoint(model, checkpoint_path, strict=True):
state_dict = load_state_dict(checkpoint_path)
# detect old format and make compatible with new format
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
state_dict = convert_to_custom_text_state_dict(state_dict)
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def create_model(
model_name: str,
img_size: int,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
require_pretrained: bool = False,
):
has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX)
if has_hf_hub_prefix:
model_id = model_name[len(HF_HUB_PREFIX):]
checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir)
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
pretrained_cfg = config['preprocess_cfg']
model_cfg = config['model_cfg']
else:
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
checkpoint_path = None
pretrained_cfg = {}
model_cfg = None
if isinstance(device, str):
device = torch.device(device)
if pretrained and pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model_cfg = model_cfg or get_model_config(model_name)
if model_cfg['vision_cfg']['image_size'] != img_size:
model_cfg['vision_cfg']['image_size'] = img_size
cast_dtype = get_cast_dtype(precision)
model_pre = load_openai_model(
model_name,
precision=precision,
device=device,
jit=jit,
cache_dir=cache_dir,
)
state_dict = model_pre.state_dict()
# to always output dict even if it is clip
if output_dict and hasattr(model_pre, "output_dict"):
model_pre.output_dict = True
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
### for resnet
if not hasattr(model.visual, 'grid_size'):
model.visual.grid_size = int(np.sqrt(model.visual.attnpool.positional_embedding.shape[0] - 1))
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=True)
model.to(device=device)
if precision in ("fp16", "bf16"):
convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
if jit:
model = torch.jit.script(model)
else:
model = load_openai_model(
model_name,
precision=precision,
device=device,
jit=jit,
cache_dir=cache_dir,
)
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
else:
model_cfg = model_cfg or get_model_config(model_name)
model_cfg['vision_cfg']['image_size'] = img_size
if model_cfg is not None:
logging.info(f'Loaded {model_name} model config.')
pass
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
if force_image_size is not None:
# override model config's image size
model_cfg["vision_cfg"]["image_size"] = force_image_size
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
cast_dtype = get_cast_dtype(precision)
is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {})
custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model
if custom_text:
if is_hf_model:
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
if "coca" in model_name:
model = CoCa(**model_cfg, cast_dtype=cast_dtype)
else:
model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype)
else:
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
pretrained_loaded = False
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
else:
error_str = (
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
logging.warning(error_str)
raise RuntimeError(error_str)
pretrained_loaded = True
elif has_hf_hub_prefix:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
pretrained_loaded = True
if require_pretrained and not pretrained_loaded:
# callers of create_model_from_pretrained always expect pretrained weights
raise RuntimeError(
f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.')
model.to(device=device)
if precision in ("fp16", "bf16"):
convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
if jit:
model = torch.jit.script(model)
return model
def create_loss(args):
if args.distill:
return DistillClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
elif "coca" in args.model.lower():
return CoCaLoss(
caption_loss_weight=args.coca_caption_loss_weight,
clip_loss_weight=args.coca_contrastive_loss_weight,
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
return ClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
def create_model_and_transforms(
model_name: str,
img_size: int,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
):
model = create_model(
model_name,
img_size,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_patch_dropout=force_patch_dropout,
force_image_size=force_image_size,
pretrained_image=pretrained_image,
pretrained_hf=pretrained_hf,
cache_dir=cache_dir,
output_dict=output_dict,
)
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess_train = image_transform(
model.visual.image_size,
is_train=True,
mean=image_mean,
std=image_std,
aug_cfg=aug_cfg,
)
preprocess_val = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess_train, preprocess_val
def create_model_from_pretrained(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
return_transform: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
cache_dir: Optional[str] = None,
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_image_size=force_image_size,
cache_dir=cache_dir,
require_pretrained=True,
)
if not return_transform:
return model
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.