date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | nneven/momo-ai | chains~task_chain.py | import config
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
class TaskChain:
def __init__(self, syllabus: str, llm: ChatOpenAI = ChatOpenAI(model='gpt-3.5-turbo', temperature=0.5)):
self.syllabus = syllabus
self.llm = llm
human_message_prompt = HumanMessagePromptTemplate(
prompt = PromptTemplate(
input_variables=["syllabus", "question"],
template="Given the following syllabus information: {syllabus}, please answer the following question about the class: {question}"
)
)
self.prompt = ChatPromptTemplate.from_messages([human_message_prompt])
self.chain = LLMChain(llm=self.llm, prompt=self.prompt)
def run(self, question: str):
return self.chain.run({"syllabus": self.syllabus, "question": question})
| [
"Given the following syllabus information: {syllabus}, please answer the following question about the class: {question}",
"question"
] |
2024-01-10 | aakimura/ghost-copywriter | generate.py | import os
import openai
# Environment variables
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
PRODUCT_MODEL = os.environ.get('PRODUCT_MODEL')
HEADLINE_MODEL = os.environ.get('HEADLINE_MODEL')
SUBHEADLINE_MODEL = os.environ.get('SUBHEADLINE_MODEL')
# Classes
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def create_completion(model, prompt, temperature=0.5,
top_p=1,frequency_penalty=0,
presence_penalty=0, stop='.\n\n'):
"""
Generate a completion with a new prompt using a pre-trained model
Parameters
----------
model: [string] ID of your pre-trained model.
prompt: [string] Text block containing your prompt. Remember to use the
same parameters as the ones you used to train your model.
Finish your prompt with `\n\n###\n\n`.and
temperature: [float] Control how creative the model should be. 1 = most
creative, 0 = more 'robotic' or strict.
Returns
-------
JSON with the completion's parameters.
"""
completion = openai.Completion.create(
model=model,
prompt=prompt,
temperature=temperature,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stop=stop,
max_tokens=60)
return completion
def create_prompt(company='', features='', product='', headline=''):
"""
Format a prompt from the provided parameters.
Parameters
----------
company: [string] Name of the company.
features: [string] List of features of the target product.
product: [string] Description of the product.
headline: [string] Website's headline.
Returns
-------
prompt_type, prompt_args
prompt_type: [int] Indicator that describes the type of prompt
0 = Company name and features.
Used to get a product description.
1 = Company name, features, and product description
Used to get a headline.
2 = Company name, features, product description and headline
Used to get a supporting headline.
prompt_args: [string] Contains a formatted prompt ready to send to OpenAI's
API.
"""
prompt_type = ''
prompt_args = []
# Company and features
if len(company) == 0:
raise ValueError('You have to provide a company name')
elif len(features) == 0:
raise ValueError("You have to provide the product's features")
else:
prompt_type = 0
prompt_args.append(
"company: {}\nfeatures: {}".format(company, features))
# Product description
if len(product) != 0:
prompt_type = 1
prompt_args.append("product: {}".format(product))
if len(headline) != 0:
prompt_type = 2
prompt_args.append("headline: {}".format(headline))
prompt_args.append("\n###\n\n")
return "\n".join(prompt_args)
def create_copy(company_name, features, product=None):
"""
Creates headline and subheadline.
"""
# Product description creation
if not product:
print("Creating product description...", end=' ', flush=True)
product_prompt = create_prompt(company=company_name, features=features)
product = create_completion(PRODUCT_MODEL, product_prompt)
product = product['choices'][0]['text']
print("Done.")
# Headline creation
print("Creating headline...", end=' ', flush=True)
headline_prompt = create_prompt(company=company_name, features=features,
product=product)
headline = create_completion(HEADLINE_MODEL, headline_prompt)
headline = headline['choices'][0]['text']
print("Done.")
# Subheadline creation
print("Creating subheadline...", end=' ', flush=True)
subheadline_prompt = create_prompt(company=company_name, features=features,
product=product, headline=headline)
subheadline = create_completion(SUBHEADLINE_MODEL, subheadline_prompt)
subheadline = subheadline['choices'][0]['text']
print("Done.")
return product, headline, subheadline
if __name__ == '__main__':
os.system('cls' if os.name == 'nt' else 'clear')
openai.api_key = OPENAI_API_KEY
print("\n","-"*50,"Input","-"*50)
company = input("What's your company name? ")
features = input("What are your product's features? ")
print("\n")
# Generate copy
product, headline, subheadline = create_copy(company, features)
# Results
print("\n","-"*50,"Results","-"*50)
print(f"{bcolors.HEADER}Company name:{bcolors.ENDC} {company}")
print(f"{bcolors.HEADER}Features:{bcolors.ENDC} {features}")
print(f"{bcolors.HEADER}Product description:{bcolors.ENDC} {product}")
print(f"{bcolors.HEADER}Headline:{bcolors.ENDC} {headline}")
print(f"{bcolors.HEADER}Subheadline:{bcolors.ENDC} {subheadline}") | [
"0",
"[]",
"1",
"2"
] |
2024-01-10 | opendilab-llm-safety/agenta | examples~test_apps~MultiChoiceParamTestApp~mytest.py | import agenta as ag
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
default_prompt = "What is a good name for a company that makes {product}?"
ag.init()
ag.config.default(
prompt_template=ag.TextParam(default_prompt),
model=ag.MultipleChoiceParam(1, [1, 2]),
)
@ag.entrypoint
def completion(
product: str,
) -> str:
llm = OpenAI(model=ag.config.model)
prompt = PromptTemplate(
input_variables=["product"],
template=ag.config.prompt_template,
)
chain = LLMChain(llm=llm, prompt=prompt)
output = chain.run(product=product)
return output
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | DataDog/py-trace-test | ddtrace~contrib~langchain~patch.py | import os
import sys
from typing import Any
from typing import Dict
from typing import Optional
from typing import TYPE_CHECKING
import langchain
from langchain.callbacks.openai_info import get_openai_token_cost_for_model
import wrapt
from ddtrace import config
from ddtrace.constants import ERROR_TYPE
from ddtrace.contrib._trace_utils_llm import BaseLLMIntegration
from ddtrace.contrib.langchain.constants import API_KEY
from ddtrace.contrib.langchain.constants import COMPLETION_TOKENS
from ddtrace.contrib.langchain.constants import MODEL
from ddtrace.contrib.langchain.constants import PROMPT_TOKENS
from ddtrace.contrib.langchain.constants import PROVIDER
from ddtrace.contrib.langchain.constants import TOTAL_COST
from ddtrace.contrib.langchain.constants import TYPE
from ddtrace.contrib.langchain.constants import text_embedding_models
from ddtrace.contrib.langchain.constants import vectorstores
from ddtrace.contrib.trace_utils import unwrap
from ddtrace.contrib.trace_utils import with_traced_module
from ddtrace.contrib.trace_utils import wrap
from ddtrace.internal.agent import get_stats_url
from ddtrace.internal.logger import get_logger
from ddtrace.internal.utils import ArgumentError
from ddtrace.internal.utils import get_argument_value
from ddtrace.internal.utils.formats import asbool
from ddtrace.internal.utils.formats import deep_getattr
from ddtrace.pin import Pin
if TYPE_CHECKING:
from ddtrace import Span
log = get_logger(__name__)
def get_version():
# type: () -> str
return getattr(langchain, "__version__", "")
config._add(
"langchain",
{
"logs_enabled": asbool(os.getenv("DD_LANGCHAIN_LOGS_ENABLED", False)),
"metrics_enabled": asbool(os.getenv("DD_LANGCHAIN_METRICS_ENABLED", True)),
"span_prompt_completion_sample_rate": float(os.getenv("DD_LANGCHAIN_SPAN_PROMPT_COMPLETION_SAMPLE_RATE", 1.0)),
"log_prompt_completion_sample_rate": float(os.getenv("DD_LANGCHAIN_LOG_PROMPT_COMPLETION_SAMPLE_RATE", 0.1)),
"span_char_limit": int(os.getenv("DD_LANGCHAIN_SPAN_CHAR_LIMIT", 128)),
"_api_key": os.getenv("DD_API_KEY"),
},
)
class _LangChainIntegration(BaseLLMIntegration):
_integration_name = "langchain"
def __init__(self, config, stats_url, site, api_key):
super().__init__(config, stats_url, site, api_key)
def _set_base_span_tags(self, span, interface_type="", provider=None, model=None, api_key=None):
# type: (Span, str, Optional[str], Optional[str], Optional[str]) -> None
"""Set base level tags that should be present on all LangChain spans (if they are not None)."""
span.set_tag_str(TYPE, interface_type)
if provider is not None:
span.set_tag_str(PROVIDER, provider)
if model is not None:
span.set_tag_str(MODEL, model)
if api_key is not None:
if len(api_key) >= 4:
span.set_tag_str(API_KEY, "...%s" % str(api_key[-4:]))
else:
span.set_tag_str(API_KEY, api_key)
@classmethod
def _logs_tags(cls, span):
# type: (Span) -> str
api_key = span.get_tag(API_KEY) or ""
tags = "env:%s,version:%s,%s:%s,%s:%s,%s:%s,%s:%s" % ( # noqa: E501
(config.env or ""),
(config.version or ""),
PROVIDER,
(span.get_tag(PROVIDER) or ""),
MODEL,
(span.get_tag(MODEL) or ""),
TYPE,
(span.get_tag(TYPE) or ""),
API_KEY,
api_key,
)
return tags
@classmethod
def _metrics_tags(cls, span):
# type: (Span) -> list
provider = span.get_tag(PROVIDER) or ""
api_key = span.get_tag(API_KEY) or ""
tags = [
"version:%s" % (config.version or ""),
"env:%s" % (config.env or ""),
"service:%s" % (span.service or ""),
"%s:%s" % (PROVIDER, provider),
"%s:%s" % (MODEL, span.get_tag(MODEL) or ""),
"%s:%s" % (TYPE, span.get_tag(TYPE) or ""),
"%s:%s" % (API_KEY, api_key),
"error:%d" % span.error,
]
err_type = span.get_tag(ERROR_TYPE)
if err_type:
tags.append("%s:%s" % (ERROR_TYPE, err_type))
return tags
def record_usage(self, span, usage):
# type: (Span, Dict[str, Any]) -> None
if not usage or self._config.metrics_enabled is False:
return
for token_type in ("prompt", "completion", "total"):
num_tokens = usage.get("token_usage", {}).get(token_type + "_tokens")
if not num_tokens:
continue
self.metric(span, "dist", "tokens.%s" % token_type, num_tokens)
total_cost = span.get_metric(TOTAL_COST)
if total_cost:
self.metric(span, "incr", "tokens.total_cost", total_cost)
def _extract_model_name(instance):
# type: (langchain.llm.BaseLLM) -> Optional[str]
"""Extract model name or ID from llm instance."""
for attr in ("model", "model_name", "model_id", "model_key", "repo_id"):
if hasattr(instance, attr):
return getattr(instance, attr)
return None
def _format_api_key(api_key):
# type: (str) -> str
"""Obfuscate a given LLM provider API key by returning the last four characters."""
if not api_key or len(api_key) < 4:
return ""
return "...%s" % api_key[-4:]
def _extract_api_key(instance):
# type: (Any) -> str
"""
Extract and format LLM-provider API key from instance.
Note that langchain's LLM/ChatModel/Embeddings interfaces do not have a
standard attribute name for storing the provider-specific API key, so make a
best effort here by checking for attributes that end with `api_key/api_token`.
"""
api_key_attrs = [a for a in dir(instance) if a.endswith(("api_token", "api_key"))]
if api_key_attrs and hasattr(instance, str(api_key_attrs[0])):
api_key = getattr(instance, api_key_attrs[0], None)
if api_key:
return _format_api_key(api_key)
return ""
def _tag_openai_token_usage(span, llm_output, propagated_cost=0, propagate=False):
# type: (Span, Dict[str, Any], int, bool) -> None
"""
Extract token usage from llm_output, tag on span.
Calculate the total cost for each LLM/chat_model, then propagate those values up the trace so that
the root span will store the total token_usage/cost of all of its descendants.
"""
for token_type in ("prompt", "completion", "total"):
current_metric_value = span.get_metric("langchain.tokens.%s_tokens" % token_type) or 0
metric_value = llm_output["token_usage"].get("%s_tokens" % token_type, 0)
span.set_metric("langchain.tokens.%s_tokens" % token_type, current_metric_value + metric_value)
total_cost = span.get_metric(TOTAL_COST) or 0
if not propagate:
try:
completion_cost = get_openai_token_cost_for_model(
span.get_tag(MODEL),
span.get_metric(COMPLETION_TOKENS),
is_completion=True,
)
prompt_cost = get_openai_token_cost_for_model(span.get_tag(MODEL), span.get_metric(PROMPT_TOKENS))
total_cost = completion_cost + prompt_cost
except ValueError:
# If not in langchain's openai model catalog, the above helpers will raise a ValueError.
log.debug("Cannot calculate token/cost as the model is not in LangChain's OpenAI model catalog.")
span.set_metric(TOTAL_COST, propagated_cost + total_cost)
if span._parent is not None:
_tag_openai_token_usage(span._parent, llm_output, propagated_cost=propagated_cost + total_cost, propagate=True)
@with_traced_module
def traced_llm_generate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type
prompts = get_argument_value(args, kwargs, 0, "prompts")
integration = langchain._datadog_integration
model = _extract_model_name(instance)
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="llm",
provider=llm_provider,
model=model,
api_key=_extract_api_key(instance),
)
completions = None
try:
if integration.is_pc_sampled_span(span):
for idx, prompt in enumerate(prompts):
span.set_tag_str("langchain.request.prompts.%d" % idx, integration.trunc(str(prompt)))
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
completions = func(*args, **kwargs)
if isinstance(instance, langchain.llms.OpenAI):
_tag_openai_token_usage(span, completions.llm_output)
integration.record_usage(span, completions.llm_output)
for idx, completion in enumerate(completions.generations):
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.response.completions.%d.text" % idx, integration.trunc(completion[0].text))
if completion and completion[0].generation_info is not None:
span.set_tag_str(
"langchain.response.completions.%d.finish_reason" % idx,
str(completion[0].generation_info.get("finish_reason")),
)
span.set_tag_str(
"langchain.response.completions.%d.logprobs" % idx,
str(completion[0].generation_info.get("logprobs")),
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if completions is None:
log_completions = []
else:
log_completions = [
[{"text": completion.text} for completion in completions] for completions in completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"prompts": prompts,
"choices": log_completions,
},
)
return completions
@with_traced_module
async def traced_llm_agenerate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type
prompts = get_argument_value(args, kwargs, 0, "prompts")
integration = langchain._datadog_integration
model = _extract_model_name(instance)
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="llm",
provider=llm_provider,
model=model,
api_key=_extract_api_key(instance),
)
completions = None
try:
if integration.is_pc_sampled_span(span):
for idx, prompt in enumerate(prompts):
span.set_tag_str("langchain.request.prompts.%d" % idx, integration.trunc(str(prompt)))
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
completions = await func(*args, **kwargs)
if isinstance(instance, langchain.llms.OpenAI):
_tag_openai_token_usage(span, completions.llm_output)
integration.record_usage(span, completions.llm_output)
for idx, completion in enumerate(completions.generations):
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.response.completions.%d.text" % idx, integration.trunc(completion[0].text))
if completion and completion[0].generation_info is not None:
span.set_tag_str(
"langchain.response.completions.%d.finish_reason" % idx,
str(completion[0].generation_info.get("finish_reason")),
)
span.set_tag_str(
"langchain.response.completions.%d.logprobs" % idx,
str(completion[0].generation_info.get("logprobs")),
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if completions is None:
log_completions = []
else:
log_completions = [
[{"text": completion.text} for completion in completions] for completions in completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"prompts": prompts,
"choices": log_completions,
},
)
return completions
@with_traced_module
def traced_chat_model_generate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type.split("-")[0]
chat_messages = get_argument_value(args, kwargs, 0, "messages")
integration = langchain._datadog_integration
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="chat_model",
provider=llm_provider,
model=_extract_model_name(instance),
api_key=_extract_api_key(instance),
)
chat_completions = None
try:
for message_set_idx, message_set in enumerate(chat_messages):
for message_idx, message in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.request.messages.%d.%d.content" % (message_set_idx, message_idx),
integration.trunc(message.content),
)
span.set_tag_str(
"langchain.request.messages.%d.%d.message_type" % (message_set_idx, message_idx),
message.__class__.__name__,
)
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
chat_completions = func(*args, **kwargs)
if isinstance(instance, langchain.chat_models.ChatOpenAI):
_tag_openai_token_usage(span, chat_completions.llm_output)
integration.record_usage(span, chat_completions.llm_output)
for message_set_idx, message_set in enumerate(chat_completions.generations):
for idx, chat_completion in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.response.completions.%d.%d.content" % (message_set_idx, idx),
integration.trunc(chat_completion.text),
)
span.set_tag_str(
"langchain.response.completions.%d.%d.message_type" % (message_set_idx, idx),
chat_completion.message.__class__.__name__,
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if chat_completions is None:
log_chat_completions = []
else:
log_chat_completions = [
[
{
"content": message.text,
"message_type": message.message.__class__.__name__,
}
for message in messages
]
for messages in chat_completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"messages": [
[
{
"content": message.content,
"message_type": message.__class__.__name__,
}
for message in messages
]
for messages in chat_messages
],
"choices": log_chat_completions,
},
)
return chat_completions
@with_traced_module
async def traced_chat_model_agenerate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type.split("-")[0]
chat_messages = get_argument_value(args, kwargs, 0, "messages")
integration = langchain._datadog_integration
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="chat_model",
provider=llm_provider,
model=_extract_model_name(instance),
api_key=_extract_api_key(instance),
)
chat_completions = None
try:
for message_set_idx, message_set in enumerate(chat_messages):
for message_idx, message in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.request.messages.%d.%d.content" % (message_set_idx, message_idx),
integration.trunc(message.content),
)
span.set_tag_str(
"langchain.request.messages.%d.%d.message_type" % (message_set_idx, message_idx),
message.__class__.__name__,
)
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
chat_completions = await func(*args, **kwargs)
if isinstance(instance, langchain.chat_models.ChatOpenAI):
_tag_openai_token_usage(span, chat_completions.llm_output)
integration.record_usage(span, chat_completions.llm_output)
for message_set_idx, message_set in enumerate(chat_completions.generations):
for idx, chat_completion in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.response.completions.%d.%d.content" % (message_set_idx, idx),
integration.trunc(chat_completion.text),
)
span.set_tag_str(
"langchain.response.completions.%d.%d.message_type" % (message_set_idx, idx),
chat_completion.message.__class__.__name__,
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if chat_completions is None:
log_chat_completions = []
else:
log_chat_completions = [
[
{
"content": message.text,
"message_type": message.message.__class__.__name__,
}
for message in messages
]
for messages in chat_completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"messages": [
[
{
"content": message.content,
"message_type": message.__class__.__name__,
}
for message in messages
]
for messages in chat_messages
],
"choices": log_chat_completions,
},
)
return chat_completions
@with_traced_module
def traced_embedding(langchain, pin, func, instance, args, kwargs):
"""
This traces both embed_query(text) and embed_documents(texts), so we need to make sure
we get the right arg/kwarg.
"""
try:
input_texts = get_argument_value(args, kwargs, 0, "texts")
except ArgumentError:
input_texts = get_argument_value(args, kwargs, 0, "text")
provider = instance.__class__.__name__.split("Embeddings")[0].lower()
integration = langchain._datadog_integration
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="embedding",
provider=provider,
model=_extract_model_name(instance),
api_key=_extract_api_key(instance),
)
try:
if isinstance(input_texts, str):
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.request.inputs.0.text", integration.trunc(input_texts))
span.set_metric("langchain.request.input_count", 1)
else:
if integration.is_pc_sampled_span(span):
for idx, text in enumerate(input_texts):
span.set_tag_str("langchain.request.inputs.%d.text" % idx, integration.trunc(text))
span.set_metric("langchain.request.input_count", len(input_texts))
# langchain currently does not support token tracking for OpenAI embeddings:
# https://github.com/hwchase17/langchain/issues/945
embeddings = func(*args, **kwargs)
if isinstance(embeddings, list) and isinstance(embeddings[0], list):
for idx, embedding in enumerate(embeddings):
span.set_metric("langchain.response.outputs.%d.embedding_length" % idx, len(embedding))
else:
span.set_metric("langchain.response.outputs.embedding_length", len(embeddings))
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={"inputs": [input_texts] if isinstance(input_texts, str) else input_texts},
)
return embeddings
@with_traced_module
def traced_chain_call(langchain, pin, func, instance, args, kwargs):
integration = langchain._datadog_integration
span = integration.trace(pin, "%s.%s" % (instance.__module__, instance.__class__.__name__), interface_type="chain")
final_outputs = {}
try:
inputs = get_argument_value(args, kwargs, 0, "inputs")
if not isinstance(inputs, dict):
inputs = {instance.input_keys[0]: inputs}
if integration.is_pc_sampled_span(span):
for k, v in inputs.items():
span.set_tag_str("langchain.request.inputs.%s" % k, integration.trunc(str(v)))
template = deep_getattr(instance, "prompt.template", default="")
if template:
span.set_tag_str("langchain.request.prompt", integration.trunc(str(template)))
final_outputs = func(*args, **kwargs)
if integration.is_pc_sampled_span(span):
for k, v in final_outputs.items():
span.set_tag_str("langchain.response.outputs.%s" % k, integration.trunc(str(v)))
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
log_inputs = {}
log_outputs = {}
for k, v in inputs.items():
log_inputs[k] = str(v)
for k, v in final_outputs.items():
log_outputs[k] = str(v)
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"inputs": log_inputs,
"prompt": str(deep_getattr(instance, "prompt.template", default="")),
"outputs": log_outputs,
},
)
return final_outputs
@with_traced_module
async def traced_chain_acall(langchain, pin, func, instance, args, kwargs):
integration = langchain._datadog_integration
span = integration.trace(pin, "%s.%s" % (instance.__module__, instance.__class__.__name__), interface_type="chain")
final_outputs = {}
try:
inputs = get_argument_value(args, kwargs, 0, "inputs")
if not isinstance(inputs, dict):
inputs = {instance.input_keys[0]: inputs}
if integration.is_pc_sampled_span(span):
for k, v in inputs.items():
span.set_tag_str("langchain.request.inputs.%s" % k, integration.trunc(str(v)))
template = deep_getattr(instance, "prompt.template", default="")
if template:
span.set_tag_str("langchain.request.prompt", integration.trunc(str(template)))
final_outputs = await func(*args, **kwargs)
if integration.is_pc_sampled_span(span):
for k, v in final_outputs.items():
span.set_tag_str("langchain.response.outputs.%s" % k, integration.trunc(str(v)))
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
log_inputs = {}
log_outputs = {}
for k, v in inputs.items():
log_inputs[k] = str(v)
for k, v in final_outputs.items():
log_outputs[k] = str(v)
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"inputs": log_inputs,
"prompt": str(deep_getattr(instance, "prompt.template", default="")),
"outputs": log_outputs,
},
)
return final_outputs
@with_traced_module
def traced_similarity_search(langchain, pin, func, instance, args, kwargs):
integration = langchain._datadog_integration
query = get_argument_value(args, kwargs, 0, "query")
k = kwargs.get("k", args[1] if len(args) >= 2 else None)
provider = instance.__class__.__name__.lower()
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="similarity_search",
provider=provider,
api_key=_extract_api_key(instance),
)
documents = []
try:
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.request.query", integration.trunc(query))
if k is not None:
span.set_tag_str("langchain.request.k", str(k))
for kwarg_key, v in kwargs.items():
span.set_tag_str("langchain.request.%s" % kwarg_key, str(v))
if isinstance(instance, langchain.vectorstores.Pinecone):
span.set_tag_str(
"langchain.request.pinecone.environment",
instance._index.configuration.server_variables.get("environment", ""),
)
span.set_tag_str(
"langchain.request.pinecone.index_name",
instance._index.configuration.server_variables.get("index_name", ""),
)
span.set_tag_str(
"langchain.request.pinecone.project_name",
instance._index.configuration.server_variables.get("project_name", ""),
)
api_key = instance._index.configuration.api_key.get("ApiKeyAuth", "")
span.set_tag_str(API_KEY, "...%s" % api_key[-4:]) # override api_key for Pinecone
documents = func(*args, **kwargs)
span.set_metric("langchain.response.document_count", len(documents))
for idx, document in enumerate(documents):
span.set_tag_str(
"langchain.response.document.%d.page_content" % idx, integration.trunc(str(document.page_content))
)
for kwarg_key, v in document.metadata.items():
span.set_tag_str(
"langchain.response.document.%d.metadata.%s" % (idx, kwarg_key), integration.trunc(str(v))
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"query": query,
"k": k or "",
"documents": [
{"page_content": document.page_content, "metadata": document.metadata} for document in documents
],
},
)
return documents
def patch():
if getattr(langchain, "_datadog_patch", False):
return
langchain._datadog_patch = True
# TODO: How do we test this? Can we mock out the metric/logger/sampler?
ddsite = os.getenv("DD_SITE", "datadoghq.com")
ddapikey = os.getenv("DD_API_KEY", config.langchain._api_key)
Pin().onto(langchain)
integration = _LangChainIntegration(
config=config.langchain,
stats_url=get_stats_url(),
site=ddsite,
api_key=ddapikey,
)
langchain._datadog_integration = integration
if config.langchain.logs_enabled:
if not ddapikey:
raise ValueError(
"DD_API_KEY is required for sending logs from the LangChain integration."
" The LangChain integration can be disabled by setting the ``DD_TRACE_LANGCHAIN_ENABLED``"
" environment variable to False."
)
integration.start_log_writer()
wrap("langchain", "llms.base.BaseLLM.generate", traced_llm_generate(langchain))
wrap("langchain", "llms.base.BaseLLM.agenerate", traced_llm_agenerate(langchain))
wrap("langchain", "chat_models.base.BaseChatModel.generate", traced_chat_model_generate(langchain))
wrap("langchain", "chat_models.base.BaseChatModel.agenerate", traced_chat_model_agenerate(langchain))
wrap("langchain", "chains.base.Chain.__call__", traced_chain_call(langchain))
wrap("langchain", "chains.base.Chain.acall", traced_chain_acall(langchain))
# Text embedding models override two abstract base methods instead of super calls, so we need to
# wrap each langchain-provided text embedding model.
for text_embedding_model in text_embedding_models:
if hasattr(langchain.embeddings, text_embedding_model):
# Ensure not double patched, as some Embeddings interfaces are pointers to other Embeddings.
if not isinstance(
deep_getattr(langchain.embeddings, "%s.embed_query" % text_embedding_model), wrapt.ObjectProxy
):
wrap("langchain", "embeddings.%s.embed_query" % text_embedding_model, traced_embedding(langchain))
if not isinstance(
deep_getattr(langchain.embeddings, "%s.embed_documents" % text_embedding_model), wrapt.ObjectProxy
):
wrap("langchain", "embeddings.%s.embed_documents" % text_embedding_model, traced_embedding(langchain))
# TODO: langchain >= 0.0.209 includes async embedding implementation (only for OpenAI)
# We need to do the same with Vectorstores.
for vectorstore in vectorstores:
if hasattr(langchain.vectorstores, vectorstore):
# Ensure not double patched, as some Embeddings interfaces are pointers to other Embeddings.
if not isinstance(
deep_getattr(langchain.vectorstores, "%s.similarity_search" % vectorstore), wrapt.ObjectProxy
):
wrap(
"langchain", "vectorstores.%s.similarity_search" % vectorstore, traced_similarity_search(langchain)
)
def unpatch():
if not getattr(langchain, "_datadog_patch", False):
return
langchain._datadog_patch = False
unwrap(langchain.llms.base.BaseLLM, "generate")
unwrap(langchain.llms.base.BaseLLM, "agenerate")
unwrap(langchain.chat_models.base.BaseChatModel, "generate")
unwrap(langchain.chat_models.base.BaseChatModel, "agenerate")
unwrap(langchain.chains.base.Chain, "__call__")
unwrap(langchain.chains.base.Chain, "acall")
for text_embedding_model in text_embedding_models:
if hasattr(langchain.embeddings, text_embedding_model):
if isinstance(
deep_getattr(langchain.embeddings, "%s.embed_query" % text_embedding_model), wrapt.ObjectProxy
):
unwrap(getattr(langchain.embeddings, text_embedding_model), "embed_query")
if isinstance(
deep_getattr(langchain.embeddings, "%s.embed_documents" % text_embedding_model), wrapt.ObjectProxy
):
unwrap(getattr(langchain.embeddings, text_embedding_model), "embed_documents")
for vectorstore in vectorstores:
if hasattr(langchain.vectorstores, vectorstore):
if isinstance(
deep_getattr(langchain.vectorstores, "%s.similarity_search" % vectorstore), wrapt.ObjectProxy
):
unwrap(getattr(langchain.vectorstores, vectorstore), "similarity_search")
delattr(langchain, "_datadog_integration")
| [
"prompt.template"
] |
2024-01-10 | juleskuehn/llm-playground | llmchat~chat~llm_utils~vertex.py | """
Functions copied from GCP examples:
https://github.com/GoogleCloudPlatform/generative-ai/blob/main/language/examples/langchain-intro/intro_langchain_palm_api.ipynb
"""
import time
from typing import List
from pydantic import BaseModel
from pgvector.django import CosineDistance
from langchain.embeddings import VertexAIEmbeddings
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.chat_models import ChatVertexAI
from langchain.llms import VertexAI
from chat.models import Message, User, Chat, DocumentChunk, Document
# Utility functions for Embeddings API with rate limiting
def rate_limit(max_per_minute):
period = 60 / max_per_minute
print("Waiting")
while True:
before = time.time()
yield
after = time.time()
elapsed = after - before
sleep_time = max(0, period - elapsed)
if sleep_time > 0:
print(".", end="")
time.sleep(sleep_time)
class CustomVertexAIEmbeddings(VertexAIEmbeddings, BaseModel):
requests_per_minute: int
num_instances_per_batch: int
# Overriding embed_documents method
def embed_documents(self, texts: List[str]):
limiter = rate_limit(self.requests_per_minute)
results = []
docs = list(texts)
while docs:
# Working in batches because the API accepts maximum 5
# documents per request to get embeddings
head, docs = (
docs[: self.num_instances_per_batch],
docs[self.num_instances_per_batch :],
)
chunk = self.client.get_embeddings(head)
results.extend(chunk)
next(limiter)
return [r.values for r in results]
# Embedding
EMBEDDING_QPM = 100
EMBEDDING_NUM_BATCH = 5
gcp_embeddings = CustomVertexAIEmbeddings(
requests_per_minute=EMBEDDING_QPM,
num_instances_per_batch=EMBEDDING_NUM_BATCH,
)
text_llm = VertexAI(max_output_tokens=1024)
summarize_chain = load_summarize_chain(text_llm, chain_type="map_reduce")
CHUNK_SIZE = 2000
CHUNK_OVERLAP = 200
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=CHUNK_SIZE,
chunk_overlap=CHUNK_OVERLAP,
)
def get_docs_chunks_by_embedding(request, query, max_distance=None):
query_embedding = gcp_embeddings.embed_documents([query])[0]
user_docs = Document.objects.filter(user=request.user)
# documents_by_mean = user_docs.order_by(
# CosineDistance("mean_embedding", query_embedding)
# )[:3]
if max_distance is None:
documents_by_summary = user_docs.order_by(
CosineDistance("summary_embedding", query_embedding)
)[:3]
chunks_by_embedding = (
DocumentChunk.objects.filter(document__in=user_docs)
.order_by(CosineDistance("embedding", query_embedding))[:10]
.prefetch_related("document")
)
else:
documents_by_summary = user_docs.alias(
distance=CosineDistance("summary_embedding", query_embedding)
).filter(distance__lt=max_distance)[:3]
chunks_by_embedding = (
DocumentChunk.objects.filter(document__in=user_docs)
.alias(distance=CosineDistance("embedding", query_embedding))
.filter(distance__lt=max_distance)
.prefetch_related("document")
)[:10]
return documents_by_summary, chunks_by_embedding
def get_qa_response(query, documents, return_sources=True):
if return_sources:
chain = load_qa_with_sources_chain(text_llm, chain_type="stuff")
response = chain(
{"input_documents": documents, "question": query}, return_only_outputs=True
)
print(response)
return response["output_text"]
else:
chain = load_qa_chain(text_llm, chain_type="stuff")
response = chain.run(input_documents=documents, question=query)
return response
| [] |
2024-01-10 | BerriAI/reliableGPT | reliablegpt~tests~test_Model.py | import sys
sys.path.append('..')
import os
import dotenv
from dotenv import load_dotenv
load_dotenv()
import openai
from Model import Model
openai.api_key = os.getenv('OPENAI_API_KEY')
obj = Model(openai.ChatCompletion.create)
create_completion = obj.get_original_completion()
print(create_completion(model="text-davinci-003", prompt="Hello world")) | [] |
2024-01-10 | BerriAI/reliableGPT | tests~test_Individual.py | # have the router (which is reliableGPT) determine if an instantiation is calling the rate limit handler or the individual request wrapper directly
# save the original references of a model in model.py -> like a Model Card
import sys
import os
import dotenv
from dotenv import load_dotenv
load_dotenv()
sys.path.append('..') # Adds the parent directory to the system path
import openai
from reliablegpt import reliableGPT
import concurrent.futures
openai.ChatCompletion.create = reliableGPT(
openai.ChatCompletion.create,
user_email="[email protected]",
fallback_strategy=["text-davinci-003", "text-davinci-003"])
print(openai.ChatCompletion.create)
good_open_ai_api_key = os.getenv('OPENAI_API_KEY')
def test_single_call_bad_key():
openai.api_key = "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"
model = "gpt-4"
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Who won the Chess championship 2022"
},
]
temperature = 0.7
error_count = 0
failure_count = 0 # Track the number of failures
try:
print("Making OpenAI Call")
response = openai.ChatCompletion.create(model=model,
messages=messages,
temperature=temperature)
print("Response: ", response)
if response and "error" in response:
error_count += 1
if response == "Sorry, the OpenAI (GPT) failed":
failure_count += 1
except Exception as e:
print("Exception occurred:", e)
error_count += 1
print(f"Error Count: {error_count}")
print(f"Fallback response count: {failure_count}")
if error_count == 0:
print("All calls executed successfully.")
else:
print("Some calls returned errors.")
test_single_call_bad_key()
def test_embedding_bad_key():
openai.Embedding.create = reliableGPT(
openai.Embedding.create,
user_email="[email protected]",
user_token='_4FTminlzIHtyWZ5Jy9UkNOoN31TirdHaqOwi-lYHfI',
send_notification=True)
openai.api_key = "bad-key"
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
print("text")
return openai.Embedding.create(input=[text],
model=model)["data"][0]["embedding"]
result = get_embedding("GM")
print(result)
test_embedding_bad_key()
def test_bad_open_ai_call():
model = "gpt-4-turbo"
openai.api_key = good_open_ai_api_key
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Who are you?"
},
]
result = openai.ChatCompletion.create(model=model, messages=messages)
print(f"Result: from open ai result {result}")
return result
test_bad_open_ai_call()
def test_bad_open_ai_call_with_q():
openai.ChatCompletion.create = reliableGPT(
openai.ChatCompletion.create,
user_email="[email protected]",
fallback_strategy=["text-davinci-003", "text-davinci-003"],
queue_requests=True)
model = "gpt-4-turbo"
openai.api_key = good_open_ai_api_key
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Who are you?"
},
]
result = openai.ChatCompletion.create(model=model, messages=messages)
print(f"Result: from open ai result {result}")
return result
test_bad_open_ai_call_with_q()
def test_multiple_calls():
model = "gpt-4"
openai.api_key = good_open_ai_api_key
messages = [{
"role": "system",
"content": "You are a helpful assistant."
}, {
"role": "user",
"content": "Who won the world series in 2020?" * 400
}, {
"role":
"assistant",
"content":
"The Los Angeles Dodgers won the World Series in 2020."
}, {
"role": "user",
"content": "Where was it played?"
}]
temperature = 0.7
error_count = 0
failure_count = 0 # Track the number of failures
def call_reliable_openai():
nonlocal error_count, failure_count
try:
print("Making OpenAI Call")
response = openai.ChatCompletion.create(model=model,
messages=messages,
temperature=temperature)
print(response)
if response and "error" in response:
error_count += 1
if response == "Sorry, the OpenAI API is currently down":
failure_count += 1
except Exception as e:
print("Exception occurred:", e)
error_count += 1
# Create a ThreadPoolExecutor with a maximum of 10 threads
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
# Submit the callable to the executor for each call
future_calls = [executor.submit(call_reliable_openai) for _ in range(20)]
# Wait for all the futures to complete
concurrent.futures.wait(future_calls)
print(f"Error Count: {error_count}")
print(f"Fallback response count: {failure_count}")
if error_count == 0:
print("All calls executed successfully.")
else:
print("Some calls returned errors.")
#test_multiple_calls()
| [
"Where was it played?",
"Who are you?",
"Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?",
"Who won the Chess championship 2022",
"You are a helpful assistant.",
"The Los Angeles Dodgers won the World Series in 2020."
] |
2024-01-10 | BerriAI/reliableGPT | reliablegpt~tests~test_AzureEndpoints.py | # have the router (which is reliableGPT) determine if an instantiation is calling the rate limit handler or the individual request wrapper directly
# save the original references of a model in model.py -> like a Model Card
import sys
import os
import dotenv
from dotenv import load_dotenv
load_dotenv()
sys.path.append('..') # Adds the parent directory to the system path
import openai
from main import reliableGPT
import concurrent.futures
## Test Azure / OpenAI Fallback
openai.api_type = "azure"
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_version = "2023-05-15"
openai.ChatCompletion.create = reliableGPT(
openai.ChatCompletion.create,
user_email="[email protected]",
azure_fallback_strategy=["chatgpt-v-2"], _test=True, verbose=True)
def simple_openai_call(prompt):
print(f"in simple openai call with question: {prompt}")
engine="chatgpt-test"
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": prompt
},
]
result = openai.ChatCompletion.create(engine=engine, messages=messages)
print(f"Result: from open ai for {prompt}, result {result}")
return result
list_questions = [
"what do you know?",
"who is jacky robinson?",
"what do you know?",
"what do you know?",
"what do you know?",
]
#bad key
# openai.api_key = "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"
# for question in list_questions:
# response = simple_openai_call(question)
# print(response)
#good key
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
for question in list_questions:
response = simple_openai_call(question)
print(response) | [
"You are a helpful assistant."
] |
2024-01-10 | BerriAI/reliableGPT | tests~test_q.py | # have the router determine if an instantiation is calling the rate limit handler or the individual request wrapper directly
# save the original references of a model in model.py -> like a Model Card
import sys
sys.path.append('..')
from dotenv import load_dotenv
load_dotenv()
import openai
from reliablegpt import reliableGPT
import os
import time
good_open_ai_api_key = os.getenv('OPENAI_API_KEY')
openai.ChatCompletion.create = reliableGPT(
openai.ChatCompletion.create,
user_email="[email protected]",
user_token="_4FTminlzIHtyWZ5Jy9UkNOoN31TirdHaqOwi-lYHfI",
queue_requests=True,
fallback_strategy=["gpt-3.5-turbo"])
def test_single_call_bad_key():
openai.api_key = "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"
model = "gpt-4"
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Who won the Chess championship 2022"
},
]
temperature = 0.7
error_count = 0
failure_count = 0 # Track the number of failures
try:
print("Making OpenAI Call")
response = openai.ChatCompletion.create(model=model,
messages=messages,
temperature=temperature)
if response and "error" in response:
error_count += 1
if response == "Sorry, the OpenAI (GPT) failed":
failure_count += 1
except Exception as e:
print("Exception occurred:", e)
error_count += 1
print(f"Error Count: {error_count}")
print(f"Fallback response count: {failure_count}")
if error_count == 0:
print("All calls executed successfully.")
else:
print("Some calls returned errors.")
print(test_single_call_bad_key())
def test_embedding_bad_key():
openai.Embedding.create = reliableGPT(
openai.Embedding.create,
user_email="[email protected]",
user_token='_4FTminlzIHtyWZ5Jy9UkNOoN31TirdHaqOwi-lYHfI',
send_notification=True)
openai.api_key = "bad-key"
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
print("text")
return openai.Embedding.create(input=[text],
model=model)["data"][0]["embedding"]
result = get_embedding("GM")
test_embedding_bad_key()
list_questions = [
"What is the difference between a list and a tuple in Python?",
"How do you iterate over a dictionary in Python?",
"What is the purpose of the 'self' parameter in a class method?",
"What are lambda functions in Python?",
"How do you remove duplicates from a list in Python?",
"What is the difference between append() and extend() methods in Python lists?",
"How do you read a file in Python?", "How do you write to a file in Python?",
"What is the difference between a shallow copy and a deep copy in Python?",
"How do you convert a string to lowercase in Python?"
]
def simple_openai_call(prompt):
print(f"in simple openai call with question: {prompt}")
model = "gpt-4-turbo"
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": prompt
},
]
result = openai.ChatCompletion.create(model=model, messages=messages)
print(f"Result: from open ai for {prompt}, result {result}")
return result
simple_openai_call("Hi im ishaan")
def test_regular_q():
openai.api_key = good_open_ai_api_key
results = {}
start_time = time.time()
for question in list_questions[:5]:
print("Making request")
print(question)
# async
result = simple_openai_call(question)
print("response")
print(result)
results[question] = result
print("\n\nDone executing\n]n")
print(results)
print(len(results))
end_time = time.time()
print("Total time:", end_time - start_time)
test_regular_q()
| [
"Who won the Chess championship 2022",
"You are a helpful assistant."
] |
2024-01-10 | BerriAI/reliableGPT | reliablegpt~tests~test_KeyManagement.py | import sys
sys.path.append('..')
import os
import traceback
import dotenv
from dotenv import load_dotenv
load_dotenv()
from KeyManagement import reliableKey
import openai
## Test: Test with valid public token (should require a local variable in .env since this is not an allowed site_url)
reliableKey.token = "MS7JjZdDxGFpsF_QAy-JBvuqiI3LdgkJgqyr5kJmsNA"
openai.api_key = reliableKey.get_key("openai", os.getenv("KEY_LOCAL_VARIABLE"))
openai.error.AuthenticationError = reliableKey.AuthenticationError
questions = ["Who do you know?", "What do you know?"]
for question in questions:
try:
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": question}])
print(chat_completion)
except:
traceback.print_exc()
continue
| [] |
2024-01-10 | BerriAI/reliableGPT | reliablegpt~tests~test_AsyncAzureEndpoint.py | import asyncio
import os
import sys
sys.path.append('..')
import dotenv
from dotenv import load_dotenv
load_dotenv()
from main import reliableGPT
import openai
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
## Test Azure / OpenAI Fallback
openai.api_type = "azure"
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_version = "2023-05-15"
print(f"openai key: {openai.api_key}")
openai.ChatCompletion.acreate = reliableGPT(
openai.ChatCompletion.acreate, _test=True,
user_email="[email protected]", azure_fallback_strategy=["chatgpt-v-2"], verbose=True)
async def create_chat_completion():
chat_completion_resp = await openai.ChatCompletion.acreate(engine="chatgpt-test", model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}])
print(chat_completion_resp)
async def call_create_chat_completion():
for _ in range(10):
await create_chat_completion()
# You can add any additional code or processing here
asyncio.run(call_create_chat_completion()) | [
"Hello world"
] |
2024-01-10 | BerriAI/reliableGPT | reliablegpt~IndividualRequest.py | from termcolor import colored
import requests
import copy
import posthog
import openai
from openai import ChatCompletion
import traceback
from uuid import uuid4
from waitress import serve
from flask import Flask, request
from uuid import uuid4
import traceback
from threading import active_count
import random
import time
import asyncio
import signal
from posthog import Posthog
posthog = Posthog(
project_api_key='phc_yZ30KsPzRXd3nYaET3VFDmquFKtMZwMTuFKVOei6viB',
host='https://app.posthog.com')
## for testing
class CustomError(Exception):
def __init__(self, error):
self.error = error
class IndividualRequest:
"""A brief description of the class."""
def __init__(self,
model=None,
fallback_strategy=[
'gpt-3.5-turbo', 'text-davinci-003', 'gpt-4',
'text-davinci-002'
],
azure_fallback_strategy=None,
graceful_string="Sorry, the OpenAI API is currently down",
user_email="",
user_token="",
send_notification=False,
logging_fn=None,
backup_openai_key="",
caching=False,
alerting=None,
max_threads=None,
_test=False,
verbose=False):
# Initialize instance variables
self.model = model
self.model_function = model.get_model_function()
self.verbose = verbose
self.graceful_string = graceful_string
self.fallback_strategy = fallback_strategy
self.user_email = user_email
self.user_token = user_token
self.save_request = logging_fn
self.backup_openai_key = backup_openai_key
self._test = _test
self.print_verbose(f"INIT fallback strategy {self.fallback_strategy}")
self.caching = caching
self.max_threads = max_threads
self.print_verbose(f"INIT with threads {self.max_threads} {self.caching} {max_threads}")
self.alerting = alerting
self.azure_fallback_strategy = azure_fallback_strategy
self.backup_model = None
self.set_cooldown = False
self.cooldown_start_time = time.time()
def handle_unhandled_exception(self, e):
self.print_verbose(colored("UNHANDLED EXCEPTION OCCURRED", "red"))
if self.alerting:
self.alerting.add_error(error_type="Unhandled Exception", error_description=traceback.format_exc())
def print_verbose(self, print_statement):
posthog.capture('[email protected]', 'print_verbose', {'rgpt_data': str(print_statement)[:2000]})
if self.verbose:
print(colored("Individual Request: " + str(print_statement), "blue"))
def start_cooldown(self):
self.set_cooldown = True
self.cooldown_start_time = time.time()
def call_model(self, args, kwargs):
try:
if self._test: # private function for testing package
error = {"type": "RandomError"}
raise CustomError(error)
if self.set_cooldown:
if time.time() - self.cooldown_start_time > 900: # endpoint is being cooled down for 15 minutes, default to fallbacks
error = {"type": "ErrorCooldown"}
raise(CustomError(error=error))
else:
self.set_cooldown = False
result = self.model_function(*args, **kwargs)
if result == None:
self.print_verbose(f"None result!")
return
error = {"type": f"OpenAI Endpoint {self.model_function} returned None"}
raise CustomError(error)
if "messages" in kwargs:
if "engine" in kwargs:
self.curr_azure_model = kwargs["engine"]
if self.caching:
self.print_verbose(kwargs["messages"])
input_prompt = "\n".join(message["content"]
for message in kwargs["messages"])
extracted_result = result['choices'][0]['message']['content']
self.print_verbose(f'This is extracted result {extracted_result}')
self.add_cache(
input_prompt, extracted_result
) # [TODO] turn this into a threaded call, reduce latency.
self.print_verbose(f"This is the result: {str(result)[:500]}")
return result
except Exception as e:
self.print_verbose(f"Error: {traceback.format_exc()}")
self.print_verbose("catches the error")
self.start_cooldown()
return self.handle_exception(args, kwargs, e)
async def async_call_model(self, args, kwargs):
try:
if self._test: # private function for testing package
error = {"type": "RandomError in async function"}
raise CustomError(error)
if self.set_cooldown:
if time.time() - self.cooldown_start_time > 900: # endpoint is being cooled down for 15 minutes
error = {"type": "ErrorCooldown"}
raise(CustomError(error=error))
else:
self.set_cooldown = False
result = await self.model_function(*args, **kwargs)
if "messages" in kwargs:
if "engine" in kwargs:
self.curr_azure_model = kwargs["engine"]
if self.caching:
self.print_verbose(kwargs["messages"])
input_prompt = "\n".join(message["content"]
for message in kwargs["messages"])
extracted_result = result['choices'][0]['message']['content']
self.print_verbose(f'This is extracted result {extracted_result}')
self.add_cache(
input_prompt, extracted_result
) # [TODO] turn this into a threaded call, reduce latency.
self.print_verbose(f"This is the result: {str(result)[:500]}")
return result
except Exception as e:
self.print_verbose("catches the error")
self.start_cooldown()
return self.handle_exception(args, kwargs, e)
## Code that handles / wraps openai calls
def __call__(self, *args, **kwargs):
try:
self.print_verbose(f"calling model function: {self.model_function}")
self.print_verbose(f"these are the kwargs: {kwargs}")
self.print_verbose(f"this is the openai api base: {openai.api_base}")
self.print_verbose(f"testing enabled: {self._test}")
try:
# this should never block running the openai call
# [TODO] make this into a threaded call to reduce impact on latency
self.save_request(
user_email=self.user_email,
graceful_string=self.graceful_string,
posthog_event='reliableGPT.request',
)
except:
self.print_verbose("ReliableGPT error occured during saving request")
self.print_verbose(f"max threads: {self.max_threads}, caching: {self.caching}")
if self.max_threads and self.caching:
self.print_verbose(f'current util: {active_count()/self.max_threads}')
thread_utilization = active_count()/self.max_threads
self.print_verbose(f"Thread utilization: {thread_utilization}")
if thread_utilization > 0.8: # over 80% utilization of threads, start returning cached responses
if "messages" in kwargs and self.caching:
self.print_verbose(kwargs["messages"])
input_prompt = "\n".join(message["content"]
for message in kwargs["messages"])
self.print_verbose(
f"queue depth is higher than the threshold, start caching")
result = self.try_cache_request(query=input_prompt)
if self.alerting:
# save_exception
self.alerting.add_error(error_type="Thread Utilization > 85%", error_description="Your thread utilization is over 85%. We've started responding with cached results, to prevent requests from dropping. Please increase capacity (allocate more threads/servers) to prevent result quality from dropping.")
if result == None: # cache miss!
pass
else:
self.print_verbose(f"returns cached result: {result}")
self.save_request(
user_email=self.user_email,
posthog_event='reliableGPT.recovered_request_cache',
graceful_string = self.graceful_string,
result=result,
posthog_metadata={
'error': 'High Thread Utilization',
'recovered_response': result,
},
errors=['High Thread Utilization'],
function_name=str(self.model_function),
kwargs=kwargs
)
return result
# Run user request
if asyncio.iscoroutinefunction(self.model_function):
return self.async_call_model(args=args, kwargs=kwargs)
else:
return self.call_model(args=args, kwargs=kwargs)
except Exception as e:
self.print_verbose(f"Error in main call function: {traceback.format_exc()}")
def add_cache(self, input_prompt, response):
try:
if self.caching:
if request:
if request.args and request.args.get("user_email"):
customer_id = request.args.get("user_email")
if request.args.get("instance_id"):
instance_id = request.args.get("instance_id")
else:
instance_id = 0000 # default instance id if none passed in
user_email = self.user_email
url = "https://reliablegpt-logging-server-7nq8.zeet-berri.zeet.app/add_cache"
querystring = {
"customer_id": customer_id,
"instance_id": instance_id,
"user_email": user_email,
"input_prompt": input_prompt,
"response": response
}
response = requests.post(url, params=querystring)
except:
pass
def try_cache_request(self, query=None):
try:
if query:
self.print_verbose("Inside the cache")
if request:
if request.args and request.args.get("user_email"):
customer_id = request.args.get("user_email")
if request.args.get("instance_id"):
instance_id = request.args.get("instance_id")
else:
instance_id = 0000 # default instance id if none passed in
user_email = self.user_email
url = "https://reliablegpt-logging-server-7nq8.zeet-berri.zeet.app/get_cache"
querystring = {
"customer_id": customer_id,
"instance_id": instance_id,
"user_email": user_email,
"input_prompt": query,
}
response = requests.get(url, params=querystring)
self.print_verbose(f"cached response: {response.json()}")
extracted_result = response.json()["response"]
results = {"choices":[{"message":{"content": extracted_result}}]}
return results
except:
traceback.print_exc()
pass
self.print_verbose(f"cache miss!")
return None
def fallback_request(self, args, kwargs, fallback_strategy):
try:
self.print_verbose("In fallback request")
result = None
new_kwargs = copy.deepcopy(kwargs) # Create a deep copy of kwargs
if self.backup_openai_key and len(
self.backup_openai_key
) > 0: # user passed in a backup key for the raw openai endpoint
# switch to the raw openai model instead of using azure.
new_kwargs["openai2"] = openai.__dict__.copy(
) # preserve the azure endpoint details
if "Embedding" in str(self.model_function):
fallback_strategy = ["text-embedding-ada-002"]
if self.azure_fallback_strategy: # try backup azure models
for engine in self.azure_fallback_strategy:
new_kwargs["engine"] = engine
new_kwargs["azure_fallback"] = True
self.print_verbose(f"new azure engine: {new_kwargs}")
result = self.make_LLM_request(new_kwargs)
if result != None:
return result
for model in fallback_strategy:
new_kwargs['model'] = model # Update the model
result = self.make_LLM_request(new_kwargs)
if result != None:
return result
return None
except:
self.print_verbose(traceback.format_exc())
return None
def make_LLM_request(self, new_kwargs):
embedding_model = self.model.get_original_embeddings()
chat_model = self.model.get_original_chat()
completion_model = self.model.get_original_completion()
try:
self.print_verbose(f"{new_kwargs.keys()}")
if "azure_fallback" in new_kwargs:
new_kwargs_except_azure_fallback_flag = {
k: v
for k, v in new_kwargs.items() if k != "azure_fallback"
}
return chat_model(**new_kwargs_except_azure_fallback_flag)
if "openai2" in new_kwargs:
openai.api_type = "openai"
openai.api_base = "https://api.openai.com/v1"
openai.api_version = None
openai.api_key = self.backup_openai_key
new_kwargs_except_openai_attributes = {
k: v
for k, v in new_kwargs.items() if k != "openai2"
}
new_kwargs_except_engine = {
k: v
for k, v in new_kwargs_except_openai_attributes.items()
if k != "engine"
}
completion = self.model_function(**new_kwargs_except_engine)
openai.api_type = new_kwargs["openai2"]["api_type"]
openai.api_base = new_kwargs["openai2"]["api_base"]
openai.api_version = new_kwargs["openai2"]["api_version"]
openai.api_key = new_kwargs["openai2"]["api_key"]
return completion
if "embedding" in str(self.model_function):
# retry embedding with diff key
self.print_verbose(colored(f"ReliableGPT: Retrying Embedding request", "blue"))
return embedding_model(**new_kwargs)
model = str(new_kwargs['model'])
self.print_verbose(
colored(f"ReliableGPT: Checking request model {model} {new_kwargs}",
"blue"))
if "3.5" in model or "4" in model: # call ChatCompletion
self.print_verbose(
colored(
f"ReliableGPT: Retrying request with model CHAT {model} {new_kwargs}",
"blue"))
return chat_model(**new_kwargs)
else:
self.print_verbose(
colored(f"ReliableGPT: Retrying request with model TEXT {model}",
"blue"))
new_kwargs['prompt'] = " ".join(
[message["content"] for message in new_kwargs['messages']])
new_kwargs.pop('messages',
None) # remove messages for completion models
return completion_model(**new_kwargs)
except Exception as e:
self.print_verbose(colored(f"ReliableGPT: Got 2nd AGAIN Error {e}", "red"))
raise ValueError(e)
def api_key_handler(self, args, kwargs, fallback_strategy, user_email,
user_token):
try:
url = f"https://reliable-gpt-backend-9gus.zeet-berri.zeet.app/get_keys?user_email={user_email}&user_token={user_token}"
response = requests.get(url)
if response.status_code == 200:
result = response.json()
if result['status'] == 'failed':
self.print_verbose(
colored(
f"ReliableGPT: No keys found for user: {user_email}, token: {user_token}",
"red"))
return None
fallback_keys = result['response'][
'openai_api_keys'] # list of fallback keys
if len(fallback_keys) == 0:
return None
for fallback_key in fallback_keys:
openai.api_key = fallback_key
result = self.make_LLM_request(kwargs)
if result != None:
return result
else:
self.print_verbose(
colored(
f"ReliableGPT: No keys found for user: {user_email}, token: {user_token}",
"red"))
return None
except Exception as e:
raise ValueError(e)
def handle_openAI_error(self,
args,
kwargs,
openAI_error,
fallback_strategy,
graceful_string,
user_email="",
user_token=""):
# Error Types from https://platform.openai.com/docs/guides/error-codes/python-library-error-types
# 1. APIError - retry, retry with fallback
# 2. Timeout - retry, retry with fallback
# 3. RateLimitError - retry, retry with fallback
# 4. APIConnectionError - Check your network settings, proxy configuration, SSL certificates, or firewall rules.
# 5. InvalidRequestError - User input was bad: context_length_exceeded,
# 6. AuthenticationError - API key not working, return default hardcoded message
# 7. ServiceUnavailableError - retry, retry with fallback
self.print_verbose(
colored(f"Inside handle openai error for User Email: {user_email}",
"red"))
if openAI_error != None:
openAI_error = openAI_error.error # index into the error attribute of the class
error_type = None # defalt to being None
if openAI_error != None and 'type' in openAI_error:
error_type = openAI_error['type']
if error_type == 'invalid_request_error' or error_type == 'InvalidRequestError':
# check if this is context window related, try with a 16k model
if openAI_error.code == 'context_length_exceeded':
self.print_verbose(
colored(
"ReliableGPT: invalid request error - context_length_exceeded",
"red"))
fallback_strategy = ['gpt-3.5-turbo-16k'] + fallback_strategy
result = self.fallback_request(args=args,
kwargs=kwargs,
fallback_strategy=fallback_strategy)
if result == None:
return graceful_string
else:
return result
if openAI_error.code == "invalid_api_key":
self.print_verbose(
colored("ReliableGPT: invalid request error - invalid_api_key",
"red"))
result = self.api_key_handler(args=args,
kwargs=kwargs,
fallback_strategy=fallback_strategy,
user_email=user_email,
user_token=user_token)
if result == None:
return graceful_string
else:
return result
# todo: alert on user_email that there is now an auth error
elif error_type == 'authentication_error' or error_type == 'AuthenticationError':
self.print_verbose(colored("ReliableGPT: Auth error", "red"))
return graceful_string
# catch all
result = self.fallback_request(args=args,
kwargs=kwargs,
fallback_strategy=fallback_strategy)
if result == None:
return graceful_string
else:
return result
return graceful_string
def handle_exception(self, args, kwargs, e):
result = self.graceful_string # default to graceful string
try:
# Attempt No. 1, exception is received from OpenAI
self.print_verbose(colored(f"ReliableGPT: Got Exception {e}", 'red'))
result = self.handle_openAI_error(
args=args,
kwargs=kwargs,
openAI_error=e,
fallback_strategy=self.fallback_strategy,
graceful_string=self.graceful_string,
user_email=self.user_email,
user_token=self.user_token)
self.print_verbose(
colored(f"ReliableGPT: Recovered got a successful response {result}",
"green"))
if result == self.graceful_string:
# did a retry with model fallback, so now try caching.
if "messages" in kwargs and self.caching:
self.print_verbose(kwargs["messages"])
input_prompt = "\n".join(message["content"]
for message in kwargs["messages"])
cached_response = self.try_cache_request(query=input_prompt)
if cached_response == None:
pass
else:
self.save_request(
user_email=self.user_email,
posthog_event='reliableGPT.recovered_request_cache',
graceful_string = self.graceful_string,
result=cached_response,
posthog_metadata={
'error': 'High Thread Utilization',
'recovered_response': cached_response,
},
errors=['High Thread Utilization'],
function_name=str(self.model_function),
kwargs=kwargs
)
return cached_response
self.save_request(
user_email=self.user_email,
graceful_string=self.graceful_string,
posthog_event='reliableGPT.recovered_request_exception',
result=result,
posthog_metadata={
'error': str(e),
'recovered_response': result
},
errors=[e],
function_name=str(self.model_function),
kwargs=kwargs)
else:
# No errors, successfull retry
self.save_request(user_email=self.user_email,
graceful_string=self.graceful_string,
posthog_event="reliableGPT.recovered_request",
result=result,
posthog_metadata={
'error': str(e),
'recovered_response': result
},
errors=[e],
function_name=str(self.model_function),
kwargs=kwargs)
except Exception as e2:
# Exception 2, After trying to rescue
traceback.print_exc()
self.print_verbose("gets 2nd error: ", e2)
self.save_request(
user_email=self.user_email,
graceful_string=self.graceful_string,
posthog_event='reliableGPT.recovered_request_exception',
result="",
posthog_metadata={
'original_error': str(e),
'error2': str(e2),
'recovered_response': self.graceful_string
},
errors=[e, e2],
function_name=str(self.model_function),
kwargs=kwargs)
raise e
return result
| [
"\n",
"content"
] |
2024-01-10 | BerriAI/reliableGPT | reliablegpt~tests~test_IndividualRequest.py | # have the router (which is reliableGPT) determine if an instantiation is calling the rate limit handler or the individual request wrapper directly
# save the original references of a model in model.py -> like a Model Card
import sys
import os
import dotenv
from dotenv import load_dotenv
load_dotenv()
sys.path.append('..') # Adds the parent directory to the system path
import openai
from main import reliableGPT
import concurrent.futures
openai.ChatCompletion.create = reliableGPT(
openai.ChatCompletion.create,
user_email="[email protected]", verbose=True)
print(openai.ChatCompletion.create)
good_open_ai_api_key = os.getenv('OPENAI_API_KEY')
def test_single_call_bad_key():
openai.api_key = "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"
model = "gpt-4"
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Who won the Chess championship 2022"
},
]
temperature = 0.7
error_count = 0
failure_count = 0 # Track the number of failures
try:
print("Making OpenAI Call")
response = openai.ChatCompletion.create(model=model,
messages=messages,
temperature=temperature)
print("Response: ", response)
if response and "error" in response:
error_count += 1
if response == "Sorry, the OpenAI (GPT) failed":
failure_count += 1
except Exception as e:
print("Exception occurred:", e)
error_count += 1
print(f"Error Count: {error_count}")
print(f"Fallback response count: {failure_count}")
if error_count == 0:
print("All calls executed successfully.")
else:
print("Some calls returned errors.")
#test_single_call_bad_key()
def test_embedding_bad_key():
openai.Embedding.create = reliableGPT(
openai.Embedding.create,
user_email="[email protected]",
user_token='_4FTminlzIHtyWZ5Jy9UkNOoN31TirdHaqOwi-lYHfI',
send_notification=True)
openai.api_key = "bad-key"
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
print("text")
return openai.Embedding.create(input=[text],
model=model)["data"][0]["embedding"]
result = get_embedding("GM")
print(result)
#test_embedding_bad_key()
def test_embedding_bad_key_fail():
openai.Embedding.create = reliableGPT(
openai.Embedding.create,
user_email="[email protected]",
send_notification=True)
openai.api_key = "bad-key"
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
print("text")
return openai.Embedding.create(input=[text],
model=model)["data"][0]["embedding"]
result = get_embedding("GM")
print(result)
#test_embedding_bad_key_fail()
def test_bad_open_ai_call():
model = "gpt-4-turbo"
openai.api_key = good_open_ai_api_key
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Who are you?"
},
]
result = openai.ChatCompletion.create(model=model, messages=messages)
print(f"Result: from open ai result {result}")
return result
test_bad_open_ai_call()
def test_bad_open_ai_call_with_q():
openai.ChatCompletion.create = reliableGPT(
openai.ChatCompletion.create,
user_email="[email protected]",
fallback_strategy=["text-davinci-003", "text-davinci-003"],
queue_requests=True)
model = "gpt-4-turbo"
openai.api_key = good_open_ai_api_key
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Who are you?"
},
]
result = openai.ChatCompletion.create(model=model, messages=messages)
print(f"Result: from open ai result {result}")
return result
# test_bad_open_ai_call_with_q()
def test_multiple_calls():
model = "gpt-4"
openai.api_key = good_open_ai_api_key
messages = [{
"role": "system",
"content": "You are a helpful assistant."
}, {
"role": "user",
"content": "Who won the world series in 2020?" * 400
}, {
"role":
"assistant",
"content":
"The Los Angeles Dodgers won the World Series in 2020."
}, {
"role": "user",
"content": "Where was it played?"
}]
temperature = 0.7
error_count = 0
failure_count = 0 # Track the number of failures
def call_reliable_openai():
nonlocal error_count, failure_count
try:
print("Making OpenAI Call")
response = openai.ChatCompletion.create(model=model,
messages=messages,
temperature=temperature)
print(response)
if response and "error" in response:
error_count += 1
if response == "Sorry, the OpenAI API is currently down":
failure_count += 1
except Exception as e:
print("Exception occurred:", e)
error_count += 1
# Create a ThreadPoolExecutor with a maximum of 10 threads
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
# Submit the callable to the executor for each call
future_calls = [executor.submit(call_reliable_openai) for _ in range(20)]
# Wait for all the futures to complete
concurrent.futures.wait(future_calls)
print(f"Error Count: {error_count}")
print(f"Fallback response count: {failure_count}")
if error_count == 0:
print("All calls executed successfully.")
else:
print("Some calls returned errors.")
#test_multiple_calls()
| [
"Where was it played?",
"Who are you?",
"Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?Who won the world series in 2020?",
"Who won the Chess championship 2022",
"You are a helpful assistant.",
"The Los Angeles Dodgers won the World Series in 2020."
] |
2024-01-10 | BerriAI/reliableGPT | reliablegpt~tests~test_Embedding.py | # have the router (which is reliableGPT) determine if an instantiation is calling the rate limit handler or the individual request wrapper directly
# save the original references of a model in model.py -> like a Model Card
import sys
import os
import dotenv
from dotenv import load_dotenv
load_dotenv()
sys.path.append('..') # Adds the parent directory to the system path
import openai
from main import reliableGPT
import concurrent.futures
## Test Azure / OpenAI Fallback
openai.api_type = "azure"
openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
openai.api_version = "2023-05-15"
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
# Wrap both completion + embedding
openai.ChatCompletion.create = reliableGPT(
openai.ChatCompletion.create,
user_email="[email protected]",
backup_openai_key=os.getenv("OPENAI_API_KEY"), _test=True, verbose=True)
openai.Embedding.create = reliableGPT(
openai.Embedding.create,
user_email="[email protected]",
backup_openai_key=os.getenv('OPENAI_API_KEY'),
verbose=True)
# Make Azure completion fail
def simple_openai_call(prompt):
print(f"in simple openai call with question: {prompt}")
engine="chatgpt-test"
messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": prompt
},
]
result = openai.ChatCompletion.create(engine=engine, messages=messages)
print(f"Result: from open ai for {prompt}, result {result}")
return result
response = simple_openai_call("What do you know?")
print(response)
# Test embedding
# choose text to embed
text_string = "sample text"
embeddings = openai.Embedding.create(deployment_id="azure-embedding-model",
input=text_string)
# print(embeddings) | [
"You are a helpful assistant."
] |
2024-01-10 | BerriAI/reliableGPT | reliablegpt~tests~test_Router.py | import sys
sys.path.append('..')
import openai
from main import reliableGPT
# Test 1: Basic usage
print(reliableGPT(openai.ChatCompletion.create, user_email="[email protected]"))
| [] |
2024-01-10 | BerriAI/reliableGPT | reliablegpt~KeyManagement.py | import requests
import openai
class reliableKey:
token = "your-token-here"
hot_cache = {}
api_url = "https://reliable-gpt-backend-9gus.zeet-berri.zeet.app"
valid_api_key = True
invalid_key_value = None
recent_get_key_params = None
class AuthenticationError(openai.error.AuthenticationError):
def __init__(self, *args, **kwargs):
reliableKey.set_invalid_key(openai.api_key)
super().__init__(*args, **kwargs)
@classmethod
def set_invalid_key(cls, invalid_key):
print(f"calling set invalid key!!: {invalid_key}")
cls.valid_api_key = False
cls.invalid_key_value = invalid_key
if cls.recent_get_key_params:
openai.api_key = cls.get_key(llm_provider=cls.recent_get_key_params["llm_provider"], local_token=cls.recent_get_key_params["local_token"])
@classmethod
def get_key(cls, llm_provider, local_token=None):
cls.recent_get_key_params = {"llm_provider": llm_provider, "local_token": local_token}
try:
print(f"valid api key in get key: {cls.valid_api_key}")
api_key = None
if llm_provider in cls.hot_cache and cls.valid_api_key:
api_key = cls.hot_cache[llm_provider]
else:
querystring = {"llm_provider": llm_provider, "token": cls.token}
if local_token:
querystring["local_token"] = local_token
if not cls.valid_api_key:
print(f"invalid key value in get_key: {cls.invalid_key_value}")
querystring["invalid_api_key"] = cls.invalid_key_value
response = requests.get(cls.api_url+"/get_key", params=querystring)
print(response.text)
api_key = response.json()["api_key"]
cls.hot_cache[llm_provider] = api_key
cls.valid_api_key = True
return api_key
except Exception as e:
raise Exception("Error caused due to either a bad token or non-existent llm provider. If you're testing locally, make sure to download your .env containing your local token.")
| [] |
2024-01-10 | jagilley/fft-embeddings | gettysburg.py | import openai
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from openai.embeddings_utils import get_embeddings
import librosa
import pickle
import os
import soundfile as sf
with open('/users/jasper/oai.txt', 'r') as f:
openai.api_key = f.read()
def split_into_chunks(text, num_words=5):
words = text.split()
chunks = []
for i in range(0, len(words)):
chunk = ' '.join(words[i:i+num_words])
# if not end of text
if i+num_words <= len(words):
chunks.append(chunk)
return chunks
text_to_load = 'texts/gettysburg.txt'
with open(text_to_load, 'r') as f:
gettysburg = f.read()
chunks = split_into_chunks(gettysburg, num_words=10)
embeddings_path = f'embeddings/{text_to_load.split("/")[-1].replace(".txt", ".pkl")}'
# Get embeddings for each chunk
if os.path.exists(embeddings_path):
with open(embeddings_path, 'rb') as f:
embeddings = pickle.load(f)
else:
embeddings = get_embeddings(chunks)
# pickle embeddings
with open(embeddings_path, 'wb') as f:
pickle.dump(embeddings, f)
# embeddings to numpy
embeddings = np.array(embeddings)
print(embeddings.shape)
# get spectrogram for each channel
for i in range(0, 1):
# continue
channel_0 = embeddings[:, i]
# Plot channel 0
# plt.plot(channel_0)
# plt.show()
# compute spectrogram for channel 0
spec = np.abs(librosa.stft(channel_0, n_fft=32, win_length=4))
print(spec.shape)
# Plot spectrogram
plt.imshow(spec)
# label axes
plt.xlabel('Time, in frames')
plt.ylabel('Frequency')
plt.show()
# convert to audio
# fill in material such that we have a 5 second audio clip
# do this by interpolating between each frame
# audio = librosa.istft(spec, win_length=4)
# # normalize audio
# audio = audio / np.max(np.abs(audio))
# # save audio
# sf.write(f'audio_{i}.wav', audio, 22050)
# get spectrogram for all channels
spec = np.abs(librosa.stft(embeddings, n_fft=32, win_length=4))
import code
code.interact(local=locals()) | [] |
2024-01-10 | jagilley/fft-embeddings | corpus.py | from nltk.corpus import reuters
import openai
from tqdm import tqdm
import numpy as np
import nltk
from openai.embeddings_utils import get_embeddings
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pickle
with open('/users/jasper/oai.txt', 'r') as f:
openai.api_key = f.read()
# nltk.download('reuters')
# perform train/test split
train_docs_id = reuters.fileids(categories='trade')
test_docs_id = reuters.fileids(categories='crude')
# get train/test docs
train_docs = [reuters.raw(doc_id) for doc_id in train_docs_id]
test_docs = [reuters.raw(doc_id) for doc_id in test_docs_id]
# get train/test labels
train_labels = [reuters.categories(doc_id)[0] for doc_id in train_docs_id]
test_labels = [reuters.categories(doc_id)[0] for doc_id in test_docs_id]
# get embeddings for train/test docs
# print('Getting embeddings...')
# train_embeddings = get_embeddings(train_docs)
# test_embeddings = get_embeddings(test_docs)
# # pickle embeddings
# with open('embeddings/train_embeddings.pkl', 'wb') as f:
# pickle.dump(train_embeddings, f)
# with open('embeddings/test_embeddings.pkl', 'wb') as f:
# pickle.dump(test_embeddings, f)
# load embeddings
with open('embeddings/train_embeddings.pkl', 'rb') as f:
train_embeddings = pickle.load(f)
with open('embeddings/test_embeddings.pkl', 'rb') as f:
test_embeddings = pickle.load(f)
# train/test split
X_train, X_test, y_train, y_test = train_test_split(train_embeddings, train_labels, test_size=0.2, random_state=42)
# train classifier
clf = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=1000, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
clf.fit(X_train, y_train)
# predict on test set
y_pred = clf.predict(X_test)
# evaluate
print(accuracy_score(y_test, y_pred))
import code
code.interact(local=locals()) | [] |
2024-01-10 | alsterlabs-ug/matny-search | backend~danswer~chat~chat_prompts.py | from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from danswer.configs.constants import MessageType
from danswer.db.models import ChatMessage
from danswer.db.models import ToolInfo
from danswer.indexing.models import InferenceChunk
from danswer.llm.utils import translate_danswer_msg_to_langchain
from danswer.prompts.constants import CODE_BLOCK_PAT
DANSWER_TOOL_NAME = "Current Search"
DANSWER_TOOL_DESCRIPTION = (
"A search tool that can find information on any topic "
"including up to date and proprietary knowledge."
)
DANSWER_SYSTEM_MSG = (
"Given a conversation (between Human and Assistant) and a final message from Human, "
"rewrite the last message to be a standalone question which captures required/relevant context "
"from previous messages. This question must be useful for a semantic search engine. "
"It is used for a natural language search."
)
YES_SEARCH = "Yes Search"
NO_SEARCH = "No Search"
REQUIRE_DANSWER_SYSTEM_MSG = (
"You are a large language model whose only job is to determine if the system should call an external search tool "
"to be able to answer the user's last message.\n"
f'\nRespond with "{NO_SEARCH}" if:\n'
f"- there is sufficient information in chat history to fully answer the user query\n"
f"- there is enough knowledge in the LLM to fully answer the user query\n"
f"- the user query does not rely on any specific knowledge\n"
f'\nRespond with "{YES_SEARCH}" if:\n'
"- additional knowledge about entities, processes, problems, or anything else could lead to a better answer.\n"
"- there is some uncertainty what the user is referring to\n\n"
f'Respond with EXACTLY and ONLY "{YES_SEARCH}" or "{NO_SEARCH}"'
)
TOOL_TEMPLATE = """
TOOLS
------
You can use tools to look up information that may be helpful in answering the user's \
original question. The available tools are:
{tool_overviews}
RESPONSE FORMAT INSTRUCTIONS
----------------------------
When responding to me, please output a response in one of two formats:
**Option 1:**
Use this if you want to use a tool. Markdown code snippet formatted in the following schema:
```json
{{
"action": string, \\ The action to take. {tool_names}
"action_input": string \\ The input to the action
}}
```
**Option #2:**
Use this if you want to respond directly to the user. Markdown code snippet formatted in the following schema:
```json
{{
"action": "Final Answer",
"action_input": string \\ You should put what you want to return to use here
}}
```
"""
TOOL_LESS_PROMPT = """
Respond with a markdown code snippet in the following schema:
```json
{{
"action": "Final Answer",
"action_input": string \\ You should put what you want to return to use here
}}
```
"""
USER_INPUT = """
USER'S INPUT
--------------------
Here is the user's input \
(remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
{user_input}
"""
TOOL_FOLLOWUP = """
TOOL RESPONSE:
---------------------
{tool_output}
USER'S INPUT
--------------------
Okay, so what is the response to my last comment? If using information obtained from the tools you must \
mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES!
If the tool response is not useful, ignore it completely.
{optional_reminder}{hint}
IMPORTANT! You MUST respond with a markdown code snippet of a json blob with a single action, and NOTHING else.
"""
TOOL_LESS_FOLLOWUP = """
Refer to the following documents when responding to my final query. Ignore any documents that are not relevant.
CONTEXT DOCUMENTS:
---------------------
{context_str}
FINAL QUERY:
--------------------
{user_query}
{hint_text}
"""
def form_user_prompt_text(
query: str,
tool_text: str | None,
hint_text: str | None,
user_input_prompt: str = USER_INPUT,
tool_less_prompt: str = TOOL_LESS_PROMPT,
) -> str:
user_prompt = tool_text or tool_less_prompt
user_prompt += user_input_prompt.format(user_input=query)
if hint_text:
if user_prompt[-1] != "\n":
user_prompt += "\n"
user_prompt += "\nHint: " + hint_text
return user_prompt.strip()
def form_tool_section_text(
tools: list[ToolInfo] | None, retrieval_enabled: bool, template: str = TOOL_TEMPLATE
) -> str | None:
if not tools and not retrieval_enabled:
return None
if retrieval_enabled and tools:
tools.append(
{"name": DANSWER_TOOL_NAME, "description": DANSWER_TOOL_DESCRIPTION}
)
tools_intro = []
if tools:
num_tools = len(tools)
for tool in tools:
description_formatted = tool["description"].replace("\n", " ")
tools_intro.append(f"> {tool['name']}: {description_formatted}")
prefix = "Must be one of " if num_tools > 1 else "Must be "
tools_intro_text = "\n".join(tools_intro)
tool_names_text = prefix + ", ".join([tool["name"] for tool in tools])
else:
return None
return template.format(
tool_overviews=tools_intro_text, tool_names=tool_names_text
).strip()
def format_danswer_chunks_for_chat(chunks: list[InferenceChunk]) -> str:
if not chunks:
return "No Results Found"
return "\n".join(
f"DOCUMENT {ind}:\n{CODE_BLOCK_PAT.format(chunk.content)}\n"
for ind, chunk in enumerate(chunks, start=1)
)
def form_tool_followup_text(
tool_output: str,
query: str,
hint_text: str | None,
tool_followup_prompt: str = TOOL_FOLLOWUP,
ignore_hint: bool = False,
) -> str:
# If multi-line query, it likely confuses the model more than helps
if "\n" not in query:
optional_reminder = f"\nAs a reminder, my query was: {query}\n"
else:
optional_reminder = ""
if not ignore_hint and hint_text:
hint_text_spaced = f"\nHint: {hint_text}\n"
else:
hint_text_spaced = ""
return tool_followup_prompt.format(
tool_output=tool_output,
optional_reminder=optional_reminder,
hint=hint_text_spaced,
).strip()
def build_combined_query(
query_message: ChatMessage,
history: list[ChatMessage],
) -> list[BaseMessage]:
user_query = query_message.message
combined_query_msgs: list[BaseMessage] = []
if not user_query:
raise ValueError("Can't rephrase/search an empty query")
combined_query_msgs.append(SystemMessage(content=DANSWER_SYSTEM_MSG))
combined_query_msgs.extend(
[translate_danswer_msg_to_langchain(msg) for msg in history]
)
combined_query_msgs.append(
HumanMessage(
content=(
"Help me rewrite this final message into a standalone query that takes into consideration the "
f"past messages of the conversation if relevant. This query is used with a semantic search engine to "
f"retrieve documents. You must ONLY return the rewritten query and nothing else. "
f"Remember, the search engine does not have access to the conversation history!"
f"\n\nQuery:\n{query_message.message}"
)
)
)
return combined_query_msgs
def form_require_search_single_msg_text(
query_message: ChatMessage,
history: list[ChatMessage],
) -> str:
prompt = "MESSAGE_HISTORY\n---------------\n" if history else ""
for msg in history:
if msg.message_type == MessageType.ASSISTANT:
prefix = "AI"
else:
prefix = "User"
prompt += f"{prefix}:\n```\n{msg.message}\n```\n\n"
prompt += f"\nFINAL QUERY:\n---------------\n{query_message.message}"
return prompt
def form_require_search_text(query_message: ChatMessage) -> str:
return (
query_message.message
+ f"\n\nHint: respond with EXACTLY {YES_SEARCH} or {NO_SEARCH}"
)
def form_tool_less_followup_text(
tool_output: str,
query: str,
hint_text: str | None,
tool_followup_prompt: str = TOOL_LESS_FOLLOWUP,
) -> str:
hint = f"Hint: {hint_text}" if hint_text else ""
return tool_followup_prompt.format(
context_str=tool_output, user_query=query, hint_text=hint
).strip()
| [
"\n",
"Help me rewrite this final message into a standalone query that takes into consideration the ",
"Remember, the search engine does not have access to the conversation history!",
"past messages of the conversation if relevant. This query is used with a semantic search engine to ",
"\nTOOLS\n------\nYou can use tools to look up information that may be helpful in answering the user's original question. The available tools are:\n\n{tool_overviews}\n\nRESPONSE FORMAT INSTRUCTIONS\n----------------------------\nWhen responding to me, please output a response in one of two formats:\n\n**Option 1:**\nUse this if you want to use a tool. Markdown code snippet formatted in the following schema:\n\n```json\n{{\n \"action\": string, \\ The action to take. {tool_names}\n \"action_input\": string \\ The input to the action\n}}\n```\n\n**Option #2:**\nUse this if you want to respond directly to the user. Markdown code snippet formatted in the following schema:\n\n```json\n{{\n \"action\": \"Final Answer\",\n \"action_input\": string \\ You should put what you want to return to use here\n}}\n```\n",
"MESSAGE_HISTORY\n---------------\n",
"\nHint: PLACEHOLDER",
"\nRespond with a markdown code snippet in the following schema:\n\n```json\n{{\n \"action\": \"Final Answer\",\n \"action_input\": string \\ You should put what you want to return to use here\n}}\n```\n",
"retrieve documents. You must ONLY return the rewritten query and nothing else. "
] |
2024-01-10 | alsterlabs-ug/matny-search | backend~danswer~direct_qa~qa_block.py | import abc
import re
from collections.abc import Callable
from collections.abc import Iterator
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from danswer.direct_qa.interfaces import AnswerQuestionReturn
from danswer.direct_qa.interfaces import AnswerQuestionStreamReturn
from danswer.direct_qa.interfaces import DanswerAnswer
from danswer.direct_qa.interfaces import DanswerQuotes
from danswer.direct_qa.interfaces import QAModel
from danswer.direct_qa.models import LLMMetricsContainer
from danswer.direct_qa.qa_utils import process_answer
from danswer.direct_qa.qa_utils import process_model_tokens
from danswer.indexing.models import InferenceChunk
from danswer.llm.interfaces import LLM
from danswer.llm.utils import check_number_of_tokens
from danswer.llm.utils import get_default_llm_token_encode
from danswer.llm.utils import tokenizer_trim_chunks
from danswer.prompts.constants import CODE_BLOCK_PAT
from danswer.prompts.direct_qa_prompts import COT_PROMPT
from danswer.prompts.direct_qa_prompts import JSON_PROMPT
from danswer.prompts.direct_qa_prompts import WEAK_LLM_PROMPT
from danswer.utils.logger import setup_logger
from danswer.utils.text_processing import clean_up_code_blocks
from danswer.utils.text_processing import escape_newlines
logger = setup_logger()
class QAHandler(abc.ABC):
@abc.abstractmethod
def build_prompt(
self, query: str, context_chunks: list[InferenceChunk]
) -> list[BaseMessage]:
raise NotImplementedError
@property
@abc.abstractmethod
def is_json_output(self) -> bool:
"""Does the model output a valid json with answer and quotes keys? Most flows with a
capable model should output a json. This hints to the model that the output is used
with a downstream system rather than freeform creative output. Most models should be
finetuned to recognize this."""
raise NotImplementedError
def process_llm_output(
self, model_output: str, context_chunks: list[InferenceChunk]
) -> tuple[DanswerAnswer, DanswerQuotes]:
return process_answer(
model_output, context_chunks, is_json_prompt=self.is_json_output
)
def process_llm_token_stream(
self, tokens: Iterator[str], context_chunks: list[InferenceChunk]
) -> AnswerQuestionStreamReturn:
yield from process_model_tokens(
tokens=tokens,
context_docs=context_chunks,
is_json_prompt=self.is_json_output,
)
class WeakLLMQAHandler(QAHandler):
"""Since Danswer supports a variety of LLMs, this less demanding prompt is provided
as an option to use with weaker LLMs such as small version, low float precision, quantized,
or distilled models. It only uses one context document and has very weak requirements of
output format.
"""
@property
def is_json_output(self) -> bool:
return False
def build_prompt(
self, query: str, context_chunks: list[InferenceChunk]
) -> list[BaseMessage]:
message = WEAK_LLM_PROMPT.format(single_reference_doc=context_chunks[0].content)
return [HumanMessage(content=message)]
class SingleMessageQAHandler(QAHandler):
@property
def is_json_output(self) -> bool:
return True
def build_prompt(
self, query: str, context_chunks: list[InferenceChunk]
) -> list[BaseMessage]:
context_docs_str = "\n".join(
f"\n{CODE_BLOCK_PAT.format(c.content)}\n" for c in context_chunks
)
single_message = JSON_PROMPT.format(
context_docs_str=context_docs_str, user_query=query
)
prompt: list[BaseMessage] = [HumanMessage(content=single_message)]
return prompt
class SingleMessageScratchpadHandler(QAHandler):
@property
def is_json_output(self) -> bool:
# Even though the full LLM output isn't a valid json
# only the valid json portion is kept and passed along
# therefore it is treated as a json output
return True
def build_prompt(
self, query: str, context_chunks: list[InferenceChunk]
) -> list[BaseMessage]:
context_docs_str = "\n".join(
f"\n{CODE_BLOCK_PAT.format(c.content)}\n" for c in context_chunks
)
single_message = COT_PROMPT.format(
context_docs_str=context_docs_str, user_query=query
)
prompt: list[BaseMessage] = [HumanMessage(content=single_message)]
return prompt
def process_llm_output(
self, model_output: str, context_chunks: list[InferenceChunk]
) -> tuple[DanswerAnswer, DanswerQuotes]:
logger.debug(model_output)
model_clean = clean_up_code_blocks(model_output)
match = re.search(r'{\s*"answer":', model_clean)
if not match:
return DanswerAnswer(answer=None), DanswerQuotes(quotes=[])
final_json = escape_newlines(model_clean[match.start() :])
return process_answer(
final_json, context_chunks, is_json_prompt=self.is_json_output
)
def process_llm_token_stream(
self, tokens: Iterator[str], context_chunks: list[InferenceChunk]
) -> AnswerQuestionStreamReturn:
# Can be supported but the parsing is more involved, not handling until needed
raise ValueError(
"This Scratchpad approach is not suitable for real time uses like streaming"
)
class QABlock(QAModel):
def __init__(self, llm: LLM, qa_handler: QAHandler) -> None:
self._llm = llm
self._qa_handler = qa_handler
@property
def requires_api_key(self) -> bool:
return self._llm.requires_api_key
def warm_up_model(self) -> None:
"""This is called during server start up to load the models into memory
in case the chosen LLM is not accessed via API"""
if self._llm.requires_warm_up:
logger.info("Warming up LLM with a first inference")
self._llm.invoke("Ignore this!")
def answer_question(
self,
query: str,
context_docs: list[InferenceChunk],
metrics_callback: Callable[[LLMMetricsContainer], None] | None = None,
) -> AnswerQuestionReturn:
trimmed_context_docs = tokenizer_trim_chunks(context_docs)
prompt = self._qa_handler.build_prompt(query, trimmed_context_docs)
model_out = self._llm.invoke(prompt)
if metrics_callback is not None:
prompt_tokens = sum(
[
check_number_of_tokens(
text=p.content, encode_fn=get_default_llm_token_encode()
)
for p in prompt
]
)
response_tokens = check_number_of_tokens(
text=model_out, encode_fn=get_default_llm_token_encode()
)
metrics_callback(
LLMMetricsContainer(
prompt_tokens=prompt_tokens, response_tokens=response_tokens
)
)
return self._qa_handler.process_llm_output(model_out, trimmed_context_docs)
def answer_question_stream(
self,
query: str,
context_docs: list[InferenceChunk],
) -> AnswerQuestionStreamReturn:
trimmed_context_docs = tokenizer_trim_chunks(context_docs)
prompt = self._qa_handler.build_prompt(query, trimmed_context_docs)
tokens = self._llm.stream(prompt)
yield from self._qa_handler.process_llm_token_stream(
tokens, trimmed_context_docs
)
| [] |
2024-01-10 | alsterlabs-ug/matny-search | backend~danswer~llm~gpt_4_all.py | from collections.abc import Iterator
from typing import Any
from langchain.schema.language_model import LanguageModelInput
from danswer.configs.model_configs import GEN_AI_MAX_OUTPUT_TOKENS
from danswer.configs.model_configs import GEN_AI_MODEL_VERSION
from danswer.configs.model_configs import GEN_AI_TEMPERATURE
from danswer.llm.interfaces import LLM
from danswer.llm.utils import convert_lm_input_to_basic_string
from danswer.utils.logger import setup_logger
logger = setup_logger()
class DummyGPT4All:
"""In the case of import failure due to architectural incompatibilities,
this module does not raise exceptions during server startup,
as long as the module isn't actually used"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise RuntimeError("GPT4All library not installed.")
try:
from gpt4all import GPT4All # type:ignore
except ImportError:
# Setting a low log level because users get scared when they see this
logger.debug(
"GPT4All library not installed. "
"If you wish to run GPT4ALL (in memory) to power Danswer's "
"Generative AI features, please install gpt4all==2.0.2."
)
GPT4All = DummyGPT4All
class DanswerGPT4All(LLM):
"""Option to run an LLM locally, however this is significantly slower and
answers tend to be much worse"""
@property
def requires_warm_up(self) -> bool:
"""GPT4All models are lazy loaded, load them on server start so that the
first inference isn't extremely delayed"""
return True
@property
def requires_api_key(self) -> bool:
return False
def __init__(
self,
timeout: int,
model_version: str = GEN_AI_MODEL_VERSION,
max_output_tokens: int = GEN_AI_MAX_OUTPUT_TOKENS,
temperature: float = GEN_AI_TEMPERATURE,
):
self.timeout = timeout
self.max_output_tokens = max_output_tokens
self.temperature = temperature
self.gpt4all_model = GPT4All(model_version)
def invoke(self, prompt: LanguageModelInput) -> str:
prompt_basic = convert_lm_input_to_basic_string(prompt)
return self.gpt4all_model.generate(prompt_basic)
def stream(self, prompt: LanguageModelInput) -> Iterator[str]:
prompt_basic = convert_lm_input_to_basic_string(prompt)
return self.gpt4all_model.generate(prompt_basic, streaming=True)
| [] |
2024-01-10 | alsterlabs-ug/matny-search | backend~danswer~llm~interfaces.py | import abc
from collections.abc import Iterator
from langchain.schema.language_model import LanguageModelInput
from danswer.utils.logger import setup_logger
logger = setup_logger()
class LLM(abc.ABC):
"""Mimics the LangChain LLM / BaseChatModel interfaces to make it easy
to use these implementations to connect to a variety of LLM providers."""
@property
def requires_warm_up(self) -> bool:
"""Is this model running in memory and needs an initial call to warm it up?"""
return False
@property
def requires_api_key(self) -> bool:
return True
@abc.abstractmethod
def invoke(self, prompt: LanguageModelInput) -> str:
raise NotImplementedError
@abc.abstractmethod
def stream(self, prompt: LanguageModelInput) -> Iterator[str]:
raise NotImplementedError
| [] |
2024-01-10 | alsterlabs-ug/matny-search | backend~danswer~chat~chat_llm.py | import re
from collections.abc import Callable
from collections.abc import Iterator
from langchain.schema.messages import AIMessage
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from sqlalchemy.orm import Session
from danswer.chat.chat_prompts import build_combined_query
from danswer.chat.chat_prompts import DANSWER_TOOL_NAME
from danswer.chat.chat_prompts import form_require_search_text
from danswer.chat.chat_prompts import form_tool_followup_text
from danswer.chat.chat_prompts import form_tool_less_followup_text
from danswer.chat.chat_prompts import form_tool_section_text
from danswer.chat.chat_prompts import form_user_prompt_text
from danswer.chat.chat_prompts import format_danswer_chunks_for_chat
from danswer.chat.chat_prompts import REQUIRE_DANSWER_SYSTEM_MSG
from danswer.chat.chat_prompts import YES_SEARCH
from danswer.chat.personas import build_system_text_from_persona
from danswer.chat.tools import call_tool
from danswer.configs.app_configs import NUM_DOCUMENT_TOKENS_FED_TO_CHAT
from danswer.configs.chat_configs import FORCE_TOOL_PROMPT
from danswer.configs.constants import IGNORE_FOR_QA
from danswer.configs.model_configs import GEN_AI_MAX_INPUT_TOKENS
from danswer.db.models import ChatMessage
from danswer.db.models import Persona
from danswer.db.models import User
from danswer.direct_qa.interfaces import DanswerAnswerPiece
from danswer.direct_qa.interfaces import DanswerChatModelOut
from danswer.direct_qa.interfaces import StreamingError
from danswer.direct_qa.qa_utils import get_usable_chunks
from danswer.document_index.factory import get_default_document_index
from danswer.indexing.models import InferenceChunk
from danswer.llm.factory import get_default_llm
from danswer.llm.interfaces import LLM
from danswer.llm.utils import get_default_llm_token_encode
from danswer.llm.utils import translate_danswer_msg_to_langchain
from danswer.search.access_filters import build_access_filters_for_user
from danswer.search.models import IndexFilters
from danswer.search.models import SearchQuery
from danswer.search.models import SearchType
from danswer.search.search_runner import chunks_to_search_docs
from danswer.search.search_runner import search_chunks
from danswer.server.models import RetrievalDocs
from danswer.utils.logger import setup_logger
from danswer.utils.text_processing import extract_embedded_json
from danswer.utils.text_processing import has_unescaped_quote
logger = setup_logger()
LLM_CHAT_FAILURE_MSG = "The large-language-model failed to generate a valid response."
def _parse_embedded_json_streamed_response(
tokens: Iterator[str],
) -> Iterator[DanswerAnswerPiece | DanswerChatModelOut]:
final_answer = False
just_start_stream = False
model_output = ""
hold = ""
finding_end = 0
for token in tokens:
model_output += token
hold += token
if (
final_answer is False
and '"action":"finalanswer",' in model_output.lower().replace(" ", "")
):
final_answer = True
if final_answer and '"actioninput":"' in model_output.lower().replace(
" ", ""
).replace("_", ""):
if not just_start_stream:
just_start_stream = True
hold = ""
if has_unescaped_quote(hold):
finding_end += 1
hold = hold[: hold.find('"')]
if finding_end <= 1:
if finding_end == 1:
finding_end += 1
yield DanswerAnswerPiece(answer_piece=hold)
hold = ""
model_final = extract_embedded_json(model_output)
if "action" not in model_final or "action_input" not in model_final:
raise ValueError("Model did not provide all required action values")
yield DanswerChatModelOut(
model_raw=model_output,
action=model_final["action"],
action_input=model_final["action_input"],
)
return
def _find_last_index(
lst: list[int], max_prompt_tokens: int = GEN_AI_MAX_INPUT_TOKENS
) -> int:
"""From the back, find the index of the last element to include
before the list exceeds the maximum"""
running_sum = 0
last_ind = 0
for i in range(len(lst) - 1, -1, -1):
running_sum += lst[i]
if running_sum > max_prompt_tokens:
last_ind = i + 1
break
if last_ind >= len(lst):
raise ValueError("Last message alone is too large!")
return last_ind
def danswer_chat_retrieval(
query_message: ChatMessage,
history: list[ChatMessage],
llm: LLM,
filters: IndexFilters,
) -> list[InferenceChunk]:
if history:
query_combination_msgs = build_combined_query(query_message, history)
reworded_query = llm.invoke(query_combination_msgs)
else:
reworded_query = query_message.message
search_query = SearchQuery(
query=reworded_query,
search_type=SearchType.HYBRID,
filters=filters,
favor_recent=False,
)
# Good Debug/Breakpoint
ranked_chunks, unranked_chunks = search_chunks(
query=search_query, document_index=get_default_document_index()
)
if not ranked_chunks:
return []
if unranked_chunks:
ranked_chunks.extend(unranked_chunks)
filtered_ranked_chunks = [
chunk for chunk in ranked_chunks if not chunk.metadata.get(IGNORE_FOR_QA)
]
# get all chunks that fit into the token limit
usable_chunks = get_usable_chunks(
chunks=filtered_ranked_chunks,
token_limit=NUM_DOCUMENT_TOKENS_FED_TO_CHAT,
)
return usable_chunks
def _drop_messages_history_overflow(
system_msg: BaseMessage | None,
system_token_count: int,
history_msgs: list[BaseMessage],
history_token_counts: list[int],
final_msg: BaseMessage,
final_msg_token_count: int,
) -> list[BaseMessage]:
"""As message history grows, messages need to be dropped starting from the furthest in the past.
The System message should be kept if at all possible and the latest user input which is inserted in the
prompt template must be included"""
if len(history_msgs) != len(history_token_counts):
# This should never happen
raise ValueError("Need exactly 1 token count per message for tracking overflow")
prompt: list[BaseMessage] = []
# Start dropping from the history if necessary
all_tokens = history_token_counts + [system_token_count, final_msg_token_count]
ind_prev_msg_start = _find_last_index(all_tokens)
if system_msg and ind_prev_msg_start <= len(history_msgs):
prompt.append(system_msg)
prompt.extend(history_msgs[ind_prev_msg_start:])
prompt.append(final_msg)
return prompt
def extract_citations_from_stream(
tokens: Iterator[str], links: list[str | None]
) -> Iterator[str]:
if not links:
yield from tokens
return
max_citation_num = len(links) + 1 # LLM is prompted to 1 index these
curr_segment = ""
prepend_bracket = False
for token in tokens:
# Special case of [1][ where ][ is a single token
if prepend_bracket:
curr_segment += "[" + curr_segment
prepend_bracket = False
curr_segment += token
possible_citation_pattern = r"(\[\d*$)" # [1, [, etc
possible_citation_found = re.search(possible_citation_pattern, curr_segment)
citation_pattern = r"\[(\d+)\]" # [1], [2] etc
citation_found = re.search(citation_pattern, curr_segment)
if citation_found:
numerical_value = int(citation_found.group(1))
if 1 <= numerical_value <= max_citation_num:
link = links[numerical_value - 1]
if link:
curr_segment = re.sub(r"\[", "[[", curr_segment, count=1)
curr_segment = re.sub("]", f"]]({link})", curr_segment, count=1)
# In case there's another open bracket like [1][, don't want to match this
possible_citation_found = None
# if we see "[", but haven't seen the right side, hold back - this may be a
# citation that needs to be replaced with a link
if possible_citation_found:
continue
# Special case with back to back citations [1][2]
if curr_segment and curr_segment[-1] == "[":
curr_segment = curr_segment[:-1]
prepend_bracket = True
yield curr_segment
curr_segment = ""
if curr_segment:
if prepend_bracket:
yield "[" + curr_segment
else:
yield curr_segment
def llm_contextless_chat_answer(
messages: list[ChatMessage],
system_text: str | None = None,
tokenizer: Callable | None = None,
) -> Iterator[DanswerAnswerPiece | StreamingError]:
try:
prompt_msgs = [translate_danswer_msg_to_langchain(msg) for msg in messages]
if system_text:
tokenizer = tokenizer or get_default_llm_token_encode()
system_tokens = len(tokenizer(system_text))
system_msg = SystemMessage(content=system_text)
message_tokens = [msg.token_count for msg in messages] + [system_tokens]
else:
message_tokens = [msg.token_count for msg in messages]
last_msg_ind = _find_last_index(message_tokens)
remaining_user_msgs = prompt_msgs[last_msg_ind:]
if not remaining_user_msgs:
raise ValueError("Last user message is too long!")
if system_text:
all_msgs = [system_msg] + remaining_user_msgs
else:
all_msgs = remaining_user_msgs
for token in get_default_llm().stream(all_msgs):
yield DanswerAnswerPiece(answer_piece=token)
except Exception as e:
logger.exception(f"LLM failed to produce valid chat message, error: {e}")
yield StreamingError(error=str(e))
def llm_contextual_chat_answer(
messages: list[ChatMessage],
persona: Persona,
user: User | None,
tokenizer: Callable,
db_session: Session,
run_search_system_text: str = REQUIRE_DANSWER_SYSTEM_MSG,
) -> Iterator[DanswerAnswerPiece | RetrievalDocs | StreamingError]:
last_message = messages[-1]
final_query_text = last_message.message
previous_messages = messages[:-1]
previous_msgs_as_basemessage = [
translate_danswer_msg_to_langchain(msg) for msg in previous_messages
]
try:
llm = get_default_llm()
if not final_query_text:
raise ValueError("User chat message is empty.")
# Determine if a search is necessary to answer the user query
user_req_search_text = form_require_search_text(last_message)
last_user_msg = HumanMessage(content=user_req_search_text)
previous_msg_token_counts = [msg.token_count for msg in previous_messages]
danswer_system_tokens = len(tokenizer(run_search_system_text))
last_user_msg_tokens = len(tokenizer(user_req_search_text))
need_search_prompt = _drop_messages_history_overflow(
system_msg=SystemMessage(content=run_search_system_text),
system_token_count=danswer_system_tokens,
history_msgs=previous_msgs_as_basemessage,
history_token_counts=previous_msg_token_counts,
final_msg=last_user_msg,
final_msg_token_count=last_user_msg_tokens,
)
# Good Debug/Breakpoint
model_out = llm.invoke(need_search_prompt)
# Model will output "Yes Search" if search is useful
# Be a little forgiving though, if we match yes, it's good enough
retrieved_chunks: list[InferenceChunk] = []
if (YES_SEARCH.split()[0] + " ").lower() in model_out.lower():
user_acl_filters = build_access_filters_for_user(user, db_session)
doc_set_filter = [doc_set.name for doc_set in persona.document_sets] or None
final_filters = IndexFilters(
source_type=None,
document_set=doc_set_filter,
time_cutoff=None,
access_control_list=user_acl_filters,
)
retrieved_chunks = danswer_chat_retrieval(
query_message=last_message,
history=previous_messages,
llm=llm,
filters=final_filters,
)
yield RetrievalDocs(top_documents=chunks_to_search_docs(retrieved_chunks))
tool_result_str = format_danswer_chunks_for_chat(retrieved_chunks)
last_user_msg_text = form_tool_less_followup_text(
tool_output=tool_result_str,
query=last_message.message,
hint_text=persona.hint_text,
)
last_user_msg_tokens = len(tokenizer(last_user_msg_text))
last_user_msg = HumanMessage(content=last_user_msg_text)
else:
last_user_msg_tokens = len(tokenizer(final_query_text))
last_user_msg = HumanMessage(content=final_query_text)
system_text = build_system_text_from_persona(persona)
system_msg = SystemMessage(content=system_text) if system_text else None
system_tokens = len(tokenizer(system_text)) if system_text else 0
prompt = _drop_messages_history_overflow(
system_msg=system_msg,
system_token_count=system_tokens,
history_msgs=previous_msgs_as_basemessage,
history_token_counts=previous_msg_token_counts,
final_msg=last_user_msg,
final_msg_token_count=last_user_msg_tokens,
)
# Good Debug/Breakpoint
tokens = llm.stream(prompt)
links = [
chunk.source_links[0] if chunk.source_links else None
for chunk in retrieved_chunks
]
for segment in extract_citations_from_stream(tokens, links):
yield DanswerAnswerPiece(answer_piece=segment)
except Exception as e:
logger.exception(f"LLM failed to produce valid chat message, error: {e}")
yield StreamingError(error=str(e))
def llm_tools_enabled_chat_answer(
messages: list[ChatMessage],
persona: Persona,
user: User | None,
tokenizer: Callable,
db_session: Session,
) -> Iterator[DanswerAnswerPiece | RetrievalDocs | StreamingError]:
retrieval_enabled = persona.retrieval_enabled
system_text = build_system_text_from_persona(persona)
hint_text = persona.hint_text
tool_text = form_tool_section_text(persona.tools, persona.retrieval_enabled)
last_message = messages[-1]
previous_messages = messages[:-1]
previous_msgs_as_basemessage = [
translate_danswer_msg_to_langchain(msg) for msg in previous_messages
]
# Failure reasons include:
# - Invalid LLM output, wrong format or wrong/missing keys
# - No "Final Answer" from model after tool calling
# - LLM times out or is otherwise unavailable
# - Calling invalid tool or tool call fails
# - Last message has more tokens than model is set to accept
# - Missing user input
try:
if not last_message.message:
raise ValueError("User chat message is empty.")
# Build the prompt using the last user message
user_text = form_user_prompt_text(
query=last_message.message,
tool_text=tool_text,
hint_text=hint_text,
)
last_user_msg = HumanMessage(content=user_text)
# Count tokens once to reuse
previous_msg_token_counts = [msg.token_count for msg in previous_messages]
system_tokens = len(tokenizer(system_text)) if system_text else 0
last_user_msg_tokens = len(tokenizer(user_text))
prompt = _drop_messages_history_overflow(
system_msg=SystemMessage(content=system_text) if system_text else None,
system_token_count=system_tokens,
history_msgs=previous_msgs_as_basemessage,
history_token_counts=previous_msg_token_counts,
final_msg=last_user_msg,
final_msg_token_count=last_user_msg_tokens,
)
llm = get_default_llm()
# Good Debug/Breakpoint
tokens = llm.stream(prompt)
final_result: DanswerChatModelOut | None = None
final_answer_streamed = False
for result in _parse_embedded_json_streamed_response(tokens):
if isinstance(result, DanswerAnswerPiece) and result.answer_piece:
yield result
final_answer_streamed = True
if isinstance(result, DanswerChatModelOut):
final_result = result
break
if final_answer_streamed:
return
if final_result is None:
raise RuntimeError("Model output finished without final output parsing.")
if (
retrieval_enabled
and final_result.action.lower() == DANSWER_TOOL_NAME.lower()
):
user_acl_filters = build_access_filters_for_user(user, db_session)
doc_set_filter = [doc_set.name for doc_set in persona.document_sets] or None
final_filters = IndexFilters(
source_type=None,
document_set=doc_set_filter,
time_cutoff=None,
access_control_list=user_acl_filters,
)
retrieved_chunks = danswer_chat_retrieval(
query_message=last_message,
history=previous_messages,
llm=llm,
filters=final_filters,
)
yield RetrievalDocs(top_documents=chunks_to_search_docs(retrieved_chunks))
tool_result_str = format_danswer_chunks_for_chat(retrieved_chunks)
else:
tool_result_str = call_tool(final_result)
# The AI's tool calling message
tool_call_msg_text = final_result.model_raw
tool_call_msg_token_count = len(tokenizer(tool_call_msg_text))
# Create the new message to use the results of the tool call
tool_followup_text = form_tool_followup_text(
tool_output=tool_result_str,
query=last_message.message,
hint_text=hint_text,
)
tool_followup_msg = HumanMessage(content=tool_followup_text)
tool_followup_tokens = len(tokenizer(tool_followup_text))
# Drop previous messages, the drop order goes: previous messages in the history,
# the last user prompt and generated intermediate messages from this recent prompt,
# the system message, then finally the tool message that was the last thing generated
follow_up_prompt = _drop_messages_history_overflow(
system_msg=SystemMessage(content=system_text) if system_text else None,
system_token_count=system_tokens,
history_msgs=previous_msgs_as_basemessage
+ [last_user_msg, AIMessage(content=tool_call_msg_text)],
history_token_counts=previous_msg_token_counts
+ [last_user_msg_tokens, tool_call_msg_token_count],
final_msg=tool_followup_msg,
final_msg_token_count=tool_followup_tokens,
)
# Good Debug/Breakpoint
tokens = llm.stream(follow_up_prompt)
for result in _parse_embedded_json_streamed_response(tokens):
if isinstance(result, DanswerAnswerPiece) and result.answer_piece:
yield result
final_answer_streamed = True
if final_answer_streamed is False:
raise RuntimeError("LLM did not to produce a Final Answer after tool call")
except Exception as e:
logger.exception(f"LLM failed to produce valid chat message, error: {e}")
yield StreamingError(error=str(e))
def llm_chat_answer(
messages: list[ChatMessage],
persona: Persona | None,
tokenizer: Callable,
user: User | None,
db_session: Session,
) -> Iterator[DanswerAnswerPiece | RetrievalDocs | StreamingError]:
# Common error cases to keep in mind:
# - User asks question about something long ago, due to context limit, the message is dropped
# - Tool use gives wrong/irrelevant results, model gets confused by the noise
# - Model is too weak of an LLM, fails to follow instructions
# - Bad persona design leads to confusing instructions to the model
# - Bad configurations, too small token limit, mismatched tokenizer to LLM, etc.
# No setting/persona available therefore no retrieval and no additional tools
if persona is None:
return llm_contextless_chat_answer(messages)
# Persona is configured but with retrieval off and no tools
# therefore cannot retrieve any context so contextless
elif persona.retrieval_enabled is False and not persona.tools:
return llm_contextless_chat_answer(
messages, system_text=persona.system_text, tokenizer=tokenizer
)
# No additional tools outside of Danswer retrieval, can use a more basic prompt
# Doesn't require tool calling output format (all LLM outputs are therefore valid)
elif persona.retrieval_enabled and not persona.tools and not FORCE_TOOL_PROMPT:
return llm_contextual_chat_answer(
messages=messages,
persona=persona,
tokenizer=tokenizer,
user=user,
db_session=db_session,
)
# Use most flexible/complex prompt format that allows arbitrary tool calls
# that are configured in the persona file
# WARNING: this flow does not work well with weaker LLMs (anything below GPT-4)
return llm_tools_enabled_chat_answer(
messages=messages,
persona=persona,
tokenizer=tokenizer,
user=user,
db_session=db_session,
)
| [] |
2024-01-10 | s-udhaya/mlflow | tests~gateway~providers~test_anthropic.py | from unittest import mock
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
import pytest
from mlflow.gateway.providers.anthropic import AnthropicProvider
from mlflow.gateway.schemas import chat, completions, embeddings
from mlflow.gateway.config import RouteConfig
from mlflow.gateway.constants import MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS
from tests.gateway.tools import MockAsyncResponse
def completions_response():
return {
"completion": "Here is a basic overview of how a car works:\n\n1. The engine. "
"The engine is the power source that makes the car move.",
"stop_reason": "max_tokens",
"model": "claude-instant-1.1",
"truncated": False,
"stop": None,
"log_id": "dee173f87ddf1357da639dee3c38d833",
"exception": None,
}
def completions_config():
return {
"name": "completions",
"route_type": "llm/v1/completions",
"model": {
"provider": "anthropic",
"name": "claude-instant-1",
"config": {
"anthropic_api_key": "key",
},
},
}
def parsed_completions_response():
return {
"candidates": [
{
"text": "Here is a basic overview of how a car works:\n\n1. The engine. "
"The engine is the power source that makes the car move.",
"metadata": {"finish_reason": "length"},
}
],
"metadata": {
"model": "claude-instant-1.1",
"route_type": "llm/v1/completions",
"input_tokens": None,
"output_tokens": None,
"total_tokens": None,
},
}
@pytest.mark.asyncio
async def test_completions():
resp = completions_response()
config = completions_config()
with mock.patch(
"aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp)
) as mock_post:
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "How does a car work?", "max_tokens": 200}
response = await provider.completions(completions.RequestPayload(**payload))
assert jsonable_encoder(response) == parsed_completions_response()
mock_post.assert_called_once()
@pytest.mark.asyncio
async def test_completions_with_default_max_tokens():
resp = completions_response()
config = completions_config()
with mock.patch(
"aiohttp.ClientSession.post", return_value=MockAsyncResponse(resp)
) as mock_post:
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "How does a car work?"}
response = await provider.completions(completions.RequestPayload(**payload))
assert jsonable_encoder(response) == parsed_completions_response()
mock_post.assert_called_once()
@pytest.mark.asyncio
async def test_completions_throws_with_invalid_max_tokens_too_large():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "Would Fozzie or Kermet win in a fight?", "max_tokens": 1000001}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert (
"Invalid value for max_tokens: cannot exceed "
f"{MLFLOW_AI_GATEWAY_ANTHROPIC_MAXIMUM_MAX_TOKENS}" in e.value.detail
)
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_unsupported_candidate_count():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "Would Fozzie or Kermet win in a fight?",
"candidate_count": 5,
"max_tokens": 10,
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "'candidate_count' must be '1' for the Anthropic provider" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_top_p_defined():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"prompt": "Would Fozzie or Kermet win in a fight?", "max_tokens": 500, "top_p": 0.6}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "Cannot set both 'temperature' and 'top_p' parameters. Please" in e.value.detail
assert e.value.status_code == 422
@pytest.mark.asyncio
async def test_completions_throws_with_stream_set_to_true():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "Could the Millennium Falcon fight a Borg Cube and win?",
"max_tokens": 5000,
"stream": "true",
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "Setting the 'stream' parameter to 'true' is not supported" in e.value.detail
assert e.value.status_code == 422
def chat_config():
return {
"name": "chat",
"route_type": "llm/v1/chat",
"model": {
"provider": "anthropic",
"name": "claude-instant-1",
"config": {
"anthropic_api_key": "key",
},
},
}
@pytest.mark.asyncio
async def test_chat_is_not_supported_for_anthropic():
config = chat_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"messages": [{"role": "user", "content": "Claude, can you chat with me? I'm lonely."}]
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.chat(chat.RequestPayload(**payload))
assert "The chat route is not available for Anthropic models" in e.value.detail
assert e.value.status_code == 404
def embedding_config():
return {
"name": "embeddings",
"route_type": "llm/v1/embeddings",
"model": {
"provider": "anthropic",
"name": "claude-1.3-100k",
"config": {
"anthropic_api_key": "key",
},
},
}
@pytest.mark.asyncio
async def test_embeddings_are_not_supported_for_anthropic():
config = embedding_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {"text": "give me that sweet, sweet vector, please."}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.embeddings(embeddings.RequestPayload(**payload))
assert "The embeddings route is not available for Anthropic models" in e.value.detail
assert e.value.status_code == 404
@pytest.mark.asyncio
async def test_param_model_is_not_permitted():
config = completions_config()
provider = AnthropicProvider(RouteConfig(**config))
payload = {
"prompt": "This should fail",
"max_tokens": 5000,
"model": "something-else",
}
with pytest.raises(HTTPException, match=r".*") as e:
await provider.completions(completions.RequestPayload(**payload))
assert "The parameter 'model' is not permitted" in e.value.detail
assert e.value.status_code == 422
| [
"Claude, can you chat with me? I'm lonely."
] |
2024-01-10 | andfanilo/social-media-tutorials | 20230611-stchatbot~streamlit_app.py | from dataclasses import dataclass
from typing import Literal
import streamlit as st
from langchain import OpenAI
from langchain.callbacks import get_openai_callback
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationSummaryMemory
import streamlit.components.v1 as components
@dataclass
class Message:
"""Class for keeping track of a chat message."""
origin: Literal["human", "ai"]
message: str
def load_css():
with open("static/styles.css", "r") as f:
css = f"<style>{f.read()}</style>"
st.markdown(css, unsafe_allow_html=True)
def initialize_session_state():
if "history" not in st.session_state:
st.session_state.history = []
if "token_count" not in st.session_state:
st.session_state.token_count = 0
if "conversation" not in st.session_state:
llm = OpenAI(
temperature=0,
openai_api_key=st.secrets["openai_api_key"],
model_name="text-davinci-003"
)
st.session_state.conversation = ConversationChain(
llm=llm,
memory=ConversationSummaryMemory(llm=llm),
)
def on_click_callback():
with get_openai_callback() as cb:
human_prompt = st.session_state.human_prompt
llm_response = st.session_state.conversation.run(
human_prompt
)
st.session_state.history.append(
Message("human", human_prompt)
)
st.session_state.history.append(
Message("ai", llm_response)
)
st.session_state.token_count += cb.total_tokens
load_css()
initialize_session_state()
st.title("Hello Custom CSS Chatbot 🤖")
chat_placeholder = st.container()
prompt_placeholder = st.form("chat-form")
credit_card_placeholder = st.empty()
with chat_placeholder:
for chat in st.session_state.history:
div = f"""
<div class="chat-row
{'' if chat.origin == 'ai' else 'row-reverse'}">
<img class="chat-icon" src="app/static/{
'ai_icon.png' if chat.origin == 'ai'
else 'user_icon.png'}"
width=32 height=32>
<div class="chat-bubble
{'ai-bubble' if chat.origin == 'ai' else 'human-bubble'}">
​{chat.message}
</div>
</div>
"""
st.markdown(div, unsafe_allow_html=True)
for _ in range(3):
st.markdown("")
with prompt_placeholder:
st.markdown("**Chat**")
cols = st.columns((6, 1))
cols[0].text_input(
"Chat",
value="Hello bot",
label_visibility="collapsed",
key="human_prompt",
)
cols[1].form_submit_button(
"Submit",
type="primary",
on_click=on_click_callback,
)
credit_card_placeholder.caption(f"""
Used {st.session_state.token_count} tokens \n
Debug Langchain conversation:
{st.session_state.conversation.memory.buffer}
""")
components.html("""
<script>
const streamlitDoc = window.parent.document;
const buttons = Array.from(
streamlitDoc.querySelectorAll('.stButton > button')
);
const submitButton = buttons.find(
el => el.innerText === 'Submit'
);
streamlitDoc.addEventListener('keydown', function(e) {
switch (e.key) {
case 'Enter':
submitButton.click();
break;
}
});
</script>
""",
height=0,
width=0,
) | [
"ai",
"human"
] |
2024-01-10 | om202/python-langchain | llm_openai.py | from langchain.llms import OpenAI
from langchain.embeddings import OpenAIEmbeddings
from getKeys import OPENAI_KEY
def get_openai_llm():
return OpenAI(openai_api_key=OPENAI_KEY, model_name="gpt-3.5-turbo-16k")
def get_openai_embeddings():
return OpenAIEmbeddings(openai_api_key=OPENAI_KEY) | [] |
2024-01-10 | om202/python-langchain | inference.py | from find_similar import retriever_chroma
from llm_openai import get_openai_llm
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
import os
from langchain.globals import set_debug, set_verbose, warnings
# check https://colab.research.google.com/drive/1gyGZn_LZNrYXYXa-pltFExbptIe7DAPe?usp=sharing#scrollTo=LZEo26mw8e5k
clear = lambda: os.system("clear")
prompt_template = """Utilize the text provided in the document below to answer the following question: {question}. Ensure to reference specific sections of the text in your response. If the document does not contain sufficient information to answer the question, use your own knowledge to provide a well-informed answer. Structure your answer in a clear and concise manner, summarizing key points from the document as necessary. Here's the document text for reference: {information}."""
llm = get_openai_llm()
chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever_chroma(),
return_source_documents=False,
)
def chat():
input_question = "test"
clear()
print("Welcome! \t Write /exit to exit.")
while True:
print("\n")
input_question = input("Question: ")
if input_question == "q" or input_question == "Q":
print("Goodbye! \n")
break
llm_response = chain(input_question)
print("\n" + llm_response["result"])
if __name__ == "__main__":
set_debug(False)
set_verbose(False)
warnings.defaultaction = "ignore"
chat()
| [
"Utilize the text provided in the document below to answer the following question: {question}. Ensure to reference specific sections of the text in your response. If the document does not contain sufficient information to answer the question, use your own knowledge to provide a well-informed answer. Structure your answer in a clear and concise manner, summarizing key points from the document as necessary. Here's the document text for reference: {information}."
] |
2024-01-10 | voho0000/GPT-testCase-Generator | server-azure.py | from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
import openai
import requests
import json
from dotenv import load_dotenv
import os
import asana
import re
# Load the .env file located in the project directory
load_dotenv()
AZURE_API_KEY = os.getenv('AZURE_API_KEY')
project_gid = '1203880491753826' # master script
openai.api_type = "azure"
# replace with you api endpoint
openai.api_base = "https://user1-create-gpt.openai.azure.com/"
openai.api_version = "2023-03-15-preview"
openai.api_key = AZURE_API_KEY
app = Flask(__name__)
CORS(app)
@app.route('/generate_test_case', methods=['POST'])
@cross_origin()
def generate_test_case():
prompt = request.json['prompt']
defect_description = request.json['defectDescription']
# Combine prompt and defect_description
full_prompt = f"{prompt} *** {defect_description} ***"
print(full_prompt)
# If you use a GPT-3 series model, use the following code to call api
'''
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.5,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=None)
test_steps = response['choices'][0]['text'].strip()
'''
# If you use GPT 3.5 or GPT 4, use the following code to call api
response = openai.ChatCompletion.create(
#engine="gpt-35-turbo",
engine="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": full_prompt}
],
temperature=0,
max_tokens=3000,
#top_p=1,
#frequency_penalty=0,
#presence_penalty=0,
#stop=None
)
test_steps = response['choices'][0]['message']['content'].strip()
print(test_steps)
return jsonify(test_steps=test_steps)
def get_custom_field_gid(project_gid, api_key):
url = f"https://app.asana.com/api/1.0/projects/{project_gid}/custom_field_settings"
headers = {
"Authorization": f"Bearer {api_key}",
}
response = requests.get(url, headers=headers)
result = response.json()
fields_option_gid = {}
for item in result['data']:
if item['custom_field']['type'] in ['enum', 'multi_enum']:
enum_options = item['custom_field']['enum_options']
option_gid = {opt['name']: opt['gid'] for opt in enum_options}
fields_option_gid[item['custom_field']['name']] = {
'enum_options': option_gid, 'type': item['custom_field']['type']}
else:
fields_option_gid[item['custom_field']['name']] = {'type': item['custom_field']['type']}
fields_gid = {}
for item in result['data']:
fields_gid[item['custom_field']['name']] = item['custom_field']['gid']
return fields_gid, fields_option_gid
def create_custom_fields(formData, fields_gid, fields_option_gid):
custom_field_data = {
"CaseSuite": formData['caseSuite'],
"Manual Test Coverage": formData["manualTestCoverage"],
"Case Source": formData['caseSource'],
"Generated By": formData['generatedBy'],
"MainTicket": formData['mainTicket'],
"Manual Test Environment": formData['manualTestEnvironment'],
"Pre-Condition": formData['preCondition'],
"Test Step": formData['testStep'],
"Expected Result": formData['expectedResult'],
"xRelease": formData['xRelease'],
"Manual Test Priority": formData['Priority']
}
custom_fields = {}
for field_name, field_options in fields_option_gid.items():
field_type = field_options.get('type')
if not (custom_field_data.get(field_name)):
continue
if field_type == 'enum':
# Get the gid of the enum option
enum_gid = field_options['enum_options'].get(custom_field_data.get(field_name))
if enum_gid:
custom_fields[field_name] = enum_gid
elif field_type == 'multi_enum':
# Get the gids of the multi-enum options
if type(custom_field_data.get(field_name))==str:
multi_enum_values = [field_options['enum_options'].get(custom_field_data.get(field_name))]
elif custom_field_data.get(field_name)==None:
multi_enum_values = None
else:
multi_enum_values = [field_options['enum_options'][name] for name in custom_field_data.get(field_name)]
if multi_enum_values:
custom_fields[field_name] = multi_enum_values
else:
# Assume it's a text field
text_value = custom_field_data.get(field_name)
if text_value:
custom_fields[field_name] = text_value
custom_fields_final= {
fields_gid["CaseSuite"]: custom_fields['CaseSuite'],
fields_gid["Manual Test Coverage"]: custom_fields["Manual Test Coverage"],
fields_gid["Case Source"]: custom_fields['Case Source'],
fields_gid["Generated By"]: custom_fields['Generated By'],
fields_gid["MainTicket"]: custom_fields['MainTicket'],
fields_gid["Manual Test Environment"]: custom_fields['Manual Test Environment'],
fields_gid["Pre-Condition"]: custom_fields['Pre-Condition'],
fields_gid["Test Step"]: custom_fields['Test Step'],
fields_gid["Expected Result"]: custom_fields['Expected Result'],
}
return custom_fields_final
@app.route('/create_task', methods=['POST'])
@cross_origin(project_gid)
def create_task():
formData = request.json
print(formData)
client = asana.Client.access_token(ASANA_API_KEY)
client.headers.update({
"Asana-Enable": "new_user_task_lists,new_goal_memberships"
})
task_id_pattern = r"/(\d+)(/f)?$"
match = re.search(task_id_pattern, formData['mainTicket'])
if match:
defect_gid = match.group(1)
else:
raise ValueError("Please provide asana task URL")
defect_task = client.tasks.get_task(
defect_gid, opt_fields=["custom_fields", "Priority"])
for field in defect_task['custom_fields']:
if field['gid'] == '1204151393723065': # xRelease gid
xRelease = field['display_value']
formData['xRelease'] = xRelease
print(xRelease)
if field['gid'] == '977653033713577': # Priority gid
Priority = field['display_value']
formData['Priority'] = Priority
print(Priority)
fields_gid, fields_option_gid = get_custom_field_gid(project_gid, ASANA_API_KEY)
custom_fields = create_custom_fields(formData, fields_gid, fields_option_gid)
if formData['generatedBy']=="Human":
name = "[MS] "+formData['name']
else:
name = "[MS][AI] "+formData['name']
result = client.tasks.create_task({
"projects": [project_gid],
"name": name,
"custom_fields": custom_fields
})
print(result["permalink_url"])
return jsonify(task_url=result["permalink_url"])
if __name__ == '__main__':
app.run()
| [
"You are a helpful assistant.",
"PLACEHOLDER *** PLACEHOLDER ***"
] |
2024-01-10 | MiPa12/openai_gym_ros | training~turtlebot2_training~scripts~start_training.py | #!/usr/bin/env python
import gym
import numpy
import time
import qlearn
from gym import wrappers
# ROS packages required
import rospy
import rospkg
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
if __name__ == '__main__':
rospy.init_node('turtlebot2_maze_qlearn',
anonymous=True, log_level=rospy.WARN)
# Init OpenAI_ROS ENV
task_and_robot_environment_name = rospy.get_param(
'/turtlebot2/task_and_robot_environment_name')
env = StartOpenAI_ROS_Environment(
task_and_robot_environment_name)
# Create the Gym environment
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Learning")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('turtlebot2_training')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
Alpha = rospy.get_param("/turtlebot2/alpha")
Epsilon = rospy.get_param("/turtlebot2/epsilon")
Gamma = rospy.get_param("/turtlebot2/gamma")
epsilon_discount = rospy.get_param("/turtlebot2/epsilon_discount")
nepisodes = rospy.get_param("/turtlebot2/nepisodes")
nsteps = rospy.get_param("/turtlebot2/nsteps")
running_step = rospy.get_param("/turtlebot2/running_step")
# Initialises the algorithm that we are going to use for learning
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
alpha=Alpha, gamma=Gamma, epsilon=Epsilon)
initial_epsilon = qlearn.epsilon
start_time = time.time()
highest_reward = 0
# Starts the main training loop: the one about the episodes to do
for x in range(nepisodes):
rospy.logdebug("############### WALL START EPISODE=>" + str(x))
cumulated_reward = 0
done = False
if qlearn.epsilon > 0.05:
qlearn.epsilon *= epsilon_discount
# Initialize the environment and get first state of the robot
observation = env.reset()
state = ''.join(map(str, observation))
# Show on screen the actual situation of the robot
# env.render()
# for each episode, we test the robot for nsteps
for i in range(nsteps):
rospy.logwarn("############### Start Step=>" + str(i))
# Pick an action based on the current state
action = qlearn.chooseAction(state)
rospy.logwarn("Next action is:%d", action)
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
rospy.logwarn(str(observation) + " " + str(reward))
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
rospy.logwarn("# episode cumulated_reward=>" +
str(cumulated_reward))
rospy.logwarn(
"# State in which we will start next step=>" + str(nextState))
qlearn.learn(state, action, reward, nextState)
if not (done):
rospy.logwarn("NOT DONE")
state = nextState
else:
rospy.logwarn("DONE")
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
rospy.logwarn("############### END Step=>" + str(i))
#raw_input("Next Step...PRESS KEY")
# rospy.sleep(2.0)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.logerr(("EP: " + str(x + 1) + " - [alpha: " + str(round(qlearn.alpha, 2)) + " - gamma: " + str(
round(qlearn.gamma, 2)) + " - epsilon: " + str(round(qlearn.epsilon, 2)) + "] - Reward: " + str(
cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s)))
rospy.loginfo(("\n|" + str(nepisodes) + "|" + str(qlearn.alpha) + "|" + str(qlearn.gamma) + "|" + str(
initial_epsilon) + "*" + str(epsilon_discount) + "|" + str(highest_reward) + "| PICTURE |"))
l = last_time_steps.tolist()
l.sort()
# print("Parameters: a="+str)
rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
rospy.loginfo("Best 100 score: {:0.2f}".format(
reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close()
| [] |
2024-01-10 | MiPa12/openai_gym_ros | training~turtlebot3_training~scripts~start_training.py | #!/usr/bin/env python
import gym
import numpy
import time
import qlearn
from gym import wrappers
# ROS packages required
import rospy
import rospkg
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
if __name__ == '__main__':
rospy.init_node('turtlebot3_qlearn',
anonymous=True, log_level=rospy.WARN)
# Init OpenAI_ROS ENV
task_and_robot_environment_name = rospy.get_param(
'/turtlebot3/task_and_robot_environment_name')
env = StartOpenAI_ROS_Environment(
task_and_robot_environment_name)
# Create the Gym environment
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Learning")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('turtlebot3_training')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
Alpha = rospy.get_param("/turtlebot3/alpha")
Epsilon = rospy.get_param("/turtlebot3/epsilon")
Gamma = rospy.get_param("/turtlebot3/gamma")
epsilon_discount = rospy.get_param("/turtlebot3/epsilon_discount")
nepisodes = rospy.get_param("/turtlebot3/nepisodes")
nsteps = rospy.get_param("/turtlebot3/nsteps")
running_step = rospy.get_param("/turtlebot3/running_step")
# Initialises the algorithm that we are going to use for learning
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
alpha=Alpha, gamma=Gamma, epsilon=Epsilon)
initial_epsilon = qlearn.epsilon
start_time = time.time()
highest_reward = 0
# Starts the main training loop: the one about the episodes to do
for x in range(nepisodes):
rospy.logdebug("############### WALL START EPISODE=>" + str(x))
cumulated_reward = 0
done = False
if qlearn.epsilon > 0.05:
qlearn.epsilon *= epsilon_discount
# Initialize the environment and get first state of the robot
observation = env.reset()
state = ''.join(map(str, observation))
# Show on screen the actual situation of the robot
# env.render()
# for each episode, we test the robot for nsteps
for i in range(nsteps):
rospy.logwarn("############### Start Step=>" + str(i))
# Pick an action based on the current state
action = qlearn.chooseAction(state)
rospy.logwarn("Next action is:%d", action)
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
rospy.logwarn(str(observation) + " " + str(reward))
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
rospy.logwarn("# episode cumulated_reward=>" +
str(cumulated_reward))
rospy.logwarn(
"# State in which we will start next step=>" + str(nextState))
qlearn.learn(state, action, reward, nextState)
if not (done):
rospy.logwarn("NOT DONE")
state = nextState
else:
rospy.logwarn("DONE")
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
rospy.logwarn("############### END Step=>" + str(i))
#raw_input("Next Step...PRESS KEY")
# rospy.sleep(2.0)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.logerr(("EP: " + str(x + 1) + " - [alpha: " + str(round(qlearn.alpha, 2)) + " - gamma: " + str(
round(qlearn.gamma, 2)) + " - epsilon: " + str(round(qlearn.epsilon, 2)) + "] - Reward: " + str(
cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s)))
rospy.loginfo(("\n|" + str(nepisodes) + "|" + str(qlearn.alpha) + "|" + str(qlearn.gamma) + "|" + str(
initial_epsilon) + "*" + str(epsilon_discount) + "|" + str(highest_reward) + "| PICTURE |"))
l = last_time_steps.tolist()
l.sort()
# print("Parameters: a="+str)
rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
rospy.loginfo("Best 100 score: {:0.2f}".format(
reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close()
| [] |
2024-01-10 | MiPa12/openai_gym_ros | openai_ros~openai_ros~src~openai_ros~task_envs~task_envs_list.py | #!/usr/bin/env python
from gym.envs.registration import register
from gym import envs
def RegisterOpenAI_Ros_Env(task_env, max_episode_steps=10000):
"""
Registers all the ENVS supported in OpenAI ROS. This way we can load them
with variable limits.
Here is where you have to PLACE YOUR NEW TASK ENV, to be registered and accesible.
return: False if the Task_Env wasnt registered, True if it was.
"""
###########################################################################
# MovingCube Task-Robot Envs
result = True
# Cubli Moving Cube
if task_env == 'TurtleBot2Maze-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot2.turtlebot2_maze:TurtleBot2MazeEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_maze
elif task_env == 'MyTurtleBot2Wall-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot2.turtlebot2_wall:TurtleBot2WallEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_wall
elif task_env == 'MyTurtleBot2WillowGarage-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot2.turtlebot2_willow_garage:TurtleBot2WillowGarageEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_willow_garage
elif task_env == 'TurtleBot3World-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot3.turtlebot3_world:TurtleBot3WorldEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot3 import turtlebot3_world
# Add here your Task Envs to be registered
else:
result = False
###########################################################################
if result:
# We check that it was really registered
supported_gym_envs = GetAllRegisteredGymEnvs()
#print("REGISTERED GYM ENVS===>"+str(supported_gym_envs))
assert (task_env in supported_gym_envs), "The Task_Robot_ENV given is not Registered ==>" + \
str(task_env)
return result
def GetAllRegisteredGymEnvs():
"""
Returns a List of all the registered Envs in the system
return EX: ['Copy-v0', 'RepeatCopy-v0', 'ReversedAddition-v0', ... ]
"""
all_envs = envs.registry.all()
env_ids = [env_spec.id for env_spec in all_envs]
return env_ids
| [] |
2024-01-10 | MiPa12/openai_gym_ros | openai_ros~openai_ros~src~openai_ros~task_envs~turtlebot2~turtlebot2_willow_garage.py | import rospy
import numpy
import time
import math
from gym import spaces
from openai_ros.robot_envs import turtlebot2_env
from gym.envs.registration import register
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class TurtleBot2WillowGarageEnv(turtlebot2_env.TurtleBot2Env):
def __init__(self):
"""
This Task Env is designed for having the TurtleBot2 in some kind of maze.
It will learn how to move around the maze without crashing.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
# This parameter HAS to be set up in the MAIN launch of the AI RL script
ros_ws_abspath = rospy.get_param("/turtlebot2/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path "+ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p "+ros_ws_abspath + \
"/src;cd "+ros_ws_abspath+";catkin_make"
ROSLauncher(rospackage_name="turtlebot_gazebo",
launch_file_name="willow_garage.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/turtlebot2/config",
yaml_file_name="turtlebot2_willow_garage.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(TurtleBot2WillowGarageEnv, self).__init__(ros_ws_abspath)
# Only variable needed to be set here
number_actions = rospy.get_param('/turtlebot2/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/turtlebot2/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Actions and Observations
self.dec_obs = rospy.get_param(
"/turtlebot2/number_decimals_precision_obs", 1)
self.linear_forward_speed = rospy.get_param(
'/turtlebot2/linear_forward_speed')
self.linear_turn_speed = rospy.get_param(
'/turtlebot2/linear_turn_speed')
self.angular_speed = rospy.get_param('/turtlebot2/angular_speed')
self.init_linear_forward_speed = rospy.get_param(
'/turtlebot2/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param(
'/turtlebot2/init_linear_turn_speed')
self.n_observations = rospy.get_param('/turtlebot2/n_observations')
self.min_range = rospy.get_param('/turtlebot2/min_range')
self.max_laser_value = rospy.get_param('/turtlebot2/max_laser_value')
self.min_laser_value = rospy.get_param('/turtlebot2/min_laser_value')
# We create two arrays based on the binary values that will be assigned
# In the discretization method.
#laser_scan = self._check_laser_scan_ready()
laser_scan = self.get_laser_scan()
rospy.logdebug("laser_scan len===>"+str(len(laser_scan.ranges)))
# Laser data
self.laser_scan_frame = laser_scan.header.frame_id
# Number of laser reading jumped
self.new_ranges = int(
math.ceil(float(len(laser_scan.ranges)) / float(self.n_observations)))
rospy.logdebug("n_observations===>"+str(self.n_observations))
rospy.logdebug(
"new_ranges, jumping laser readings===>"+str(self.new_ranges))
high = numpy.full((self.n_observations), self.max_laser_value)
low = numpy.full((self.n_observations), self.min_laser_value)
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>" +
str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("/turtlebot2/forwards_reward")
self.turn_reward = rospy.get_param("/turtlebot2/turn_reward")
self.end_episode_points = rospy.get_param(
"/turtlebot2/end_episode_points")
self.cumulated_steps = 0.0
self.laser_filtered_pub = rospy.Publisher(
'/turtlebot2/laser/scan_filtered', LaserScan, queue_size=1)
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base(self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=-1)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
# We wait a small ammount of time to start everything because in very fast resets, laser scan values are sluggish
# and sometimes still have values from the prior position that triguered the done.
time.sleep(1.0)
# TODO: Add reset of published filtered laser readings
laser_scan = self.get_laser_scan()
discretized_ranges = laser_scan.ranges
self.publish_filtered_laser_scan(laser_original_data=laser_scan,
new_filtered_laser_range=discretized_ranges)
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the turtlebot2
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: # FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: # LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: # RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
# We tell TurtleBot2 the linear and angular speed to set to execute
self.move_base(linear_speed,
angular_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=self.min_range)
rospy.logdebug("END Set Action ==>"+str(action) +
", NAME="+str(self.last_action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot2Env API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
rospy.logdebug("BEFORE DISCRET _episode_done==>" +
str(self._episode_done))
discretized_observations = self.discretize_observation(laser_scan,
self.new_ranges
)
rospy.logdebug("Observations==>"+str(discretized_observations))
rospy.logdebug("AFTER DISCRET_episode_done==>"+str(self._episode_done))
rospy.logdebug("END Get Observation ==>")
return discretized_observations
def _is_done(self, observations):
if self._episode_done:
rospy.logdebug("TurtleBot2 is Too Close to wall==>" +
str(self._episode_done))
else:
rospy.logerr("TurtleBot2 is Ok ==>")
return self._episode_done
def _compute_reward(self, observations, done):
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
else:
reward = -1*self.end_episode_points
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_observation(self, data, new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
filtered_range = []
#mod = len(data.ranges)/new_ranges
mod = new_ranges
max_laser_value = data.range_max
min_laser_value = data.range_min
rospy.logdebug("data=" + str(data))
rospy.logwarn("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i % mod == 0):
if item == float('Inf') or numpy.isinf(item):
# discretized_ranges.append(self.max_laser_value)
discretized_ranges.append(
round(max_laser_value, self.dec_obs))
elif numpy.isnan(item):
# discretized_ranges.append(self.min_laser_value)
discretized_ranges.append(
round(min_laser_value, self.dec_obs))
else:
# discretized_ranges.append(int(item))
discretized_ranges.append(round(item, self.dec_obs))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
# We add last value appended
filtered_range.append(discretized_ranges[-1])
else:
# We add value zero
filtered_range.append(0.1)
rospy.logdebug(
"Size of observations, discretized_ranges==>"+str(len(discretized_ranges)))
self.publish_filtered_laser_scan(laser_original_data=data,
new_filtered_laser_range=discretized_ranges)
return discretized_ranges
def publish_filtered_laser_scan(self, laser_original_data, new_filtered_laser_range):
rospy.logdebug("new_filtered_laser_range==>" +
str(new_filtered_laser_range))
laser_filtered_object = LaserScan()
h = Header()
# Note you need to call rospy.init_node() before this will work
h.stamp = rospy.Time.now()
h.frame_id = laser_original_data.header.frame_id
laser_filtered_object.header = h
laser_filtered_object.angle_min = laser_original_data.angle_min
laser_filtered_object.angle_max = laser_original_data.angle_max
new_angle_incr = abs(laser_original_data.angle_max -
laser_original_data.angle_min) / len(new_filtered_laser_range)
#laser_filtered_object.angle_increment = laser_original_data.angle_increment
laser_filtered_object.angle_increment = new_angle_incr
laser_filtered_object.time_increment = laser_original_data.time_increment
laser_filtered_object.scan_time = laser_original_data.scan_time
laser_filtered_object.range_min = laser_original_data.range_min
laser_filtered_object.range_max = laser_original_data.range_max
laser_filtered_object.ranges = []
laser_filtered_object.intensities = []
for item in new_filtered_laser_range:
if item == 0.0:
laser_distance = 0.1
else:
laser_distance = item
laser_filtered_object.ranges.append(laser_distance)
laser_filtered_object.intensities.append(item)
self.laser_filtered_pub.publish(laser_filtered_object)
| [] |
2024-01-10 | ckw017/ray | rllib~examples~env~cliff_walking_wall_env.py | import gym
from gym import spaces
ACTION_UP = 0
ACTION_RIGHT = 1
ACTION_DOWN = 2
ACTION_LEFT = 3
class CliffWalkingWallEnv(gym.Env):
"""Modified version of the CliffWalking environment from OpenAI Gym
with walls instead of a cliff.
### Description
The board is a 4x12 matrix, with (using NumPy matrix indexing):
- [3, 0] or obs==36 as the start at bottom-left
- [3, 11] or obs==47 as the goal at bottom-right
- [3, 1..10] or obs==37...46 as the cliff at bottom-center
An episode terminates when the agent reaches the goal.
### Actions
There are 4 discrete deterministic actions:
- 0: move up
- 1: move right
- 2: move down
- 3: move left
You can also use the constants ACTION_UP, ACTION_RIGHT, ... defined above.
### Observations
There are 3x12 + 2 possible states, not including the walls. If an action
would move an agent into one of the walls, it simply stays in the same position.
### Reward
Each time step incurs -1 reward, except reaching the goal which gives +10 reward.
"""
def __init__(self, seed=42) -> None:
self.observation_space = spaces.Discrete(48)
self.action_space = spaces.Discrete(4)
self.observation_space.seed(seed)
self.action_space.seed(seed)
def reset(self):
self.position = 36
return self.position
def step(self, action):
x = self.position // 12
y = self.position % 12
# UP
if action == ACTION_UP:
x = max(x - 1, 0)
# RIGHT
elif action == ACTION_RIGHT:
if self.position != 36:
y = min(y + 1, 11)
# DOWN
elif action == ACTION_DOWN:
if self.position < 25 or self.position > 34:
x = min(x + 1, 3)
# LEFT
elif action == ACTION_LEFT:
if self.position != 47:
y = max(y - 1, 0)
else:
raise ValueError(f"action {action} not in {self.action_space}")
self.position = x * 12 + y
done = self.position == 47
reward = -1 if not done else 10
return self.position, reward, done, {}
| [] |
2024-01-10 | flammalpha/chatgpt-discordbot-python | text_generation.py | from openai import AsyncOpenAI, OpenAI
import tiktoken
class Chat:
def __init__(self, token: str, model_version: str) -> None:
self.api_key = token
self.model_version = model_version
def get_response(self, message_history: dict, model_version: str = None) -> str:
'''Fetches response from ChatGPT with entire message history'''
fetch_model_version = model_version if model_version is not None else self.model_version
print("Fetching response from ChatGPT")
completion = OpenAI(api_key=self.api_key).chat.completions.create(
model=fetch_model_version, messages=message_history)
response = completion.choices[0].message.content
# print(response)
print(f"Response with {len(response)} characters")
return response
async def get_response_async(self, message_history: dict, model_version: str = None) -> str:
'''Fetches response from ChatGPT with entire message history'''
fetch_model_version = model_version if model_version is not None else self.model_version
print("Fetching response from ChatGPT")
completion = await AsyncOpenAI(api_key=self.api_key).chat.completions.create(
model=fetch_model_version, messages=message_history)
response = completion.choices[0].message.content
# print(response)
print(f"Response with {len(response)} characters")
return response
def calculate_tokens(self, messages: dict) -> int:
'''Calculates an estimate of the tokens used by message history'''
counter = tiktoken.encoding_for_model(self.model_version)
raise "Not implemented yet"
for entry in messages:
counter.count_tokens(entry.content)
return counter.count
| [] |
2024-01-10 | alonsodelcordova/chatbot-open_ia-python | screen.py | import openai
import gradio
openai.api_key = "{your_key}"
messages = [{"role": "system", "content": "You are a financial experts that specializes in real estate investment and negotiation"}]
def CustomChatGPT(user_input):
messages.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = messages
)
ChatGPT_reply = response["choices"][0]["message"]["content"]
messages.append({"role": "assistant", "content": ChatGPT_reply})
return ChatGPT_reply
demo = gradio.Interface(fn=CustomChatGPT, inputs = "text", outputs = "text", title = "Real Estate Pro")
demo.launch(share=True) | [
"You are a financial experts that specializes in real estate investment and negotiation"
] |
2024-01-10 | alonsodelcordova/chatbot-open_ia-python | console.py | import openai
openai.api_key = "{your_key}"
messages = []
system_msg = input("¿Qué tipo de chat bot te gustaría crear?\n")
messages.append({"role": "system", "content": system_msg})
print("¡Tu nuevo asistente virtual está listo! \nIngrese su mensaje o escriba quit() para salir.")
while input != "quit()":
message = input()
messages.append({"role": "user", "content": message})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages)
reply = response["choices"][0]["message"]["content"]
messages.append({"role": "assistant", "content": reply})
print("\n" + reply + "\n")
print("Ingrese su mensaje o escriba quit() para salir.") | [] |
2024-01-10 | Auzdora/Aurora | functions.py | from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import OpenAIEmbeddings
from configer import ConfigLoader
configer = ConfigLoader()
OPENAI_API_KEY = configer.get_api_key()
OPENAI_API_BASE = configer.get_api_base()
embedding_model = OpenAIEmbeddings(
openai_api_key=OPENAI_API_KEY,
openai_api_base=OPENAI_API_BASE
)
def search_personal_vector_db(query: str):
"""Let GPT return a modified user query to search the local database
and answer local database information.
"""
db = FAISS.load_local(
configer.config["system"]["vector_db"]["store_path"],
embeddings=embedding_model
)
docs = db.similarity_search(query)
return docs
FUNCTION_LIB = {
"search_personal_vector_db": search_personal_vector_db,
}
FUNCTION = [
{
"name": "search_personal_vector_db",
"description": "Let GPT return a modified user query to search the local database \
and answer local database information.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "User input query, GPT can modified for better similarity search",
},
},
"required": ["query"]
},
},
]
| [] |
2024-01-10 | Auzdora/Aurora | dump_data.py | from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import NotionDirectoryLoader
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
from configer import ConfigLoader
text_spliter = RecursiveCharacterTextSplitter(
chunk_size = 1000,
chunk_overlap = 100,
length_function = len,
add_start_index = True,
)
class DataDumper:
"""
DataDumper is a class that dump data from database to local, and load data from local to database.
"""
def __init__(self, config_loader: ConfigLoader) -> None:
"""
Args:
config_loader (ConfigLoader): config loader
"""
self.OPEN_AI_KEY = config_loader.get_api_key()
self.OPEN_AI_BASE = config_loader.get_api_base()
self.config_loader = config_loader
self.text_spliter = RecursiveCharacterTextSplitter(
chunk_size = 5000,
chunk_overlap = 1000,
length_function = len,
add_start_index = True,
)
self.embedding_model = OpenAIEmbeddings(
openai_api_key=self.OPEN_AI_KEY,
openai_api_base=self.OPEN_AI_BASE
)
if self.config_loader.config["system"]["vector_db"]["localized"] is False:
print("dumping your local data ...")
raw_documents = []
# data connection
if self.config_loader.config["system"]["database"].get("notion_db"):
loader = NotionDirectoryLoader("Notion_DB")
notion_raw_docs = loader.load()
raw_documents.append(notion_raw_docs)
if self.config_loader.config["system"]["database"].get("mardown_db"):
pass
split_doc = self.text_spliter.split_documents(raw_documents[0])
vector_db = FAISS.from_documents(split_doc, self.embedding_model)
# split documents and add more documents
for raw_doc in raw_documents[1:]:
split_doc = self.text_spliter.split_documents(raw_doc)
vector_db.add_documents(split_doc, self.embedding_model)
self.dump_vector_db(vector_db)
self.config_loader.config["system"]["vector_db"]["localized"] = True
self.config_loader.write()
self.store_path = self.config_loader.config["system"]["vector_db"]["store_path"]
def dump_vector_db(self, vector_db):
"""
dump vector db to local
"""
vector_db.save_local("./")
def get_vector_db(self) -> FAISS:
"""
load vector db from local
"""
return FAISS.load_local(self.store_path, embeddings=self.embedding_model)
| [] |
2024-01-10 | CodeGeek04/vpet-interpreter | vpet~interpreter~interpreter.py | """
Right off the bat, to any contributors (a message from Killian):
First of all, THANK YOU. Open Interpreter is ALIVE, ALL OVER THE WORLD because of YOU.
While this project is rapidly growing, I've decided it's best for us to allow some technical debt.
The code here has duplication. It has imports in weird places. It has been spaghettified to add features more quickly.
In my opinion **this is critical** to keep up with the pace of demand for this project.
At the same time, I plan on pushing a significant re-factor of `interpreter.py` and `code_interpreter.py` ~ September 11th.
After the re-factor, Open Interpreter's source code will be much simpler, and much more fun to dive into.
Especially if you have ideas and **EXCITEMENT** about the future of this project, chat with me on discord: https://discord.gg/6p3fD6rBVm
- killian
"""
from .cli import cli
from .utils import merge_deltas, parse_partial_json
from .message_block import MessageBlock
from .code_block import CodeBlock
from .code_interpreter import CodeInterpreter
from .get_hf_llm import get_hf_llm
import os
import time
import traceback
import json
import platform
import openai
import litellm
import pkg_resources
import getpass
import requests
import readline
import tokentrim as tt
from rich import print
from rich.markdown import Markdown
from rich.rule import Rule
# Function schema for gpt-4
function_schema = {
"name": "run_code",
"description":
"Executes code on the user's machine and returns the output",
"parameters": {
"type": "object",
"properties": {
"language": {
"type": "string",
"description":
"The programming language",
"enum": ["python", "R", "shell", "applescript", "javascript", "html"]
},
"code": {
"type": "string",
"description": "The code to execute"
}
},
"required": ["language", "code"]
},
}
# Message for when users don't have an OpenAI API key.
missing_api_key_message = """> OpenAI API key not found
To use `GPT-4` (recommended) please provide an OpenAI API key.
To use `Code-Llama` (free but less capable) press `enter`.
"""
# Message for when users don't have an OpenAI API key.
missing_azure_info_message = """> Azure OpenAI Service API info not found
To use `GPT-4` (recommended) please provide an Azure OpenAI API key, a API base, a deployment name and a API version.
To use `Code-Llama` (free but less capable) press `enter`.
"""
confirm_mode_message = """
**Open Interpreter** will require approval before running code. Use `interpreter -y` to bypass this.
Press `CTRL-C` to exit.
"""
class Interpreter:
def __init__(self, messages = [], auto_run=False):
self.messages = messages
self.temperature = 0.001
self.api_key = None
self.auto_run = auto_run
self.local = False
self.model = "gpt-4"
self.debug_mode = False
self.api_base = None # Will set it to whatever OpenAI wants
self.context_window = 2000 # For local models only
self.max_tokens = 750 # For local models only
# Azure OpenAI
self.use_azure = False
self.azure_api_base = None
self.azure_api_version = None
self.azure_deployment_name = None
self.azure_api_type = "azure"
# Get default system message
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'system_message.txt'), 'r') as f:
self.system_message = f.read().strip()
# Store Code Interpreter instances for each language
self.code_interpreters = {}
# No active block to start
# (blocks are visual representation of messages on the terminal)
self.active_block = None
# Note: While Open Interpreter can use Llama, we will prioritize gpt-4.
# gpt-4 is faster, smarter, can call functions, and is all-around easier to use.
# This makes gpt-4 better aligned with Open Interpreters priority to be easy to use.
self.llama_instance = None
def cli(self):
# The cli takes the current instance of Interpreter,
# modifies it according to command line flags, then runs chat.
cli(self)
def get_info_for_system_message(self):
"""
Gets relevent information for the system message.
"""
info = ""
# Add user info
username = getpass.getuser()
current_working_directory = os.getcwd()
operating_system = platform.system()
info += f"[User Info]\nName: {username}\nCWD: {current_working_directory}\nOS: {operating_system}"
if not self.local:
# Open Procedures is an open-source database of tiny, structured coding tutorials.
# We can query it semantically and append relevant tutorials/procedures to our system message:
# Use the last two messages' content or function call to semantically search
query = []
for message in self.messages[-2:]:
message_for_semantic_search = {"role": message["role"]}
if "content" in message:
message_for_semantic_search["content"] = message["content"]
if "function_call" in message and "parsed_arguments" in message["function_call"]:
message_for_semantic_search["function_call"] = message["function_call"]["parsed_arguments"]
query.append(message_for_semantic_search)
# Use them to query Open Procedures
url = "https://open-procedures.replit.app/search/"
try:
relevant_procedures = requests.get(url, data=json.dumps(query)).json()["procedures"]
info += "\n\n# Recommended Procedures\n" + "\n---\n".join(relevant_procedures) + "\nIn your plan, include steps and, if present, **EXACT CODE SNIPPETS** (especially for depracation notices, **WRITE THEM INTO YOUR PLAN -- underneath each numbered step** as they will VANISH once you execute your first line of code, so WRITE THEM DOWN NOW if you need them) from the above procedures if they are relevant to the task. Again, include **VERBATIM CODE SNIPPETS** from the procedures above if they are relevent to the task **directly in your plan.**"
except:
# For someone, this failed for a super secure SSL reason.
# Since it's not stricly necessary, let's worry about that another day. Should probably log this somehow though.
pass
elif self.local:
# Tell Code-Llama how to run code.
info += "\n\nTo run code, write a fenced code block (i.e ```python, R or ```shell) in markdown. When you close it with ```, it will be run. You'll then be given its output."
# We make references in system_message.txt to the "function" it can call, "run_code".
return info
def reset(self):
"""
Resets the interpreter.
"""
self.messages = []
self.code_interpreters = {}
def load(self, messages):
self.messages = messages
def handle_undo(self, arguments):
# Removes all messages after the most recent user entry (and the entry itself).
# Therefore user can jump back to the latest point of conversation.
# Also gives a visual representation of the messages removed.
if len(self.messages) == 0:
return
# Find the index of the last 'role': 'user' entry
last_user_index = None
for i, message in enumerate(self.messages):
if message.get('role') == 'user':
last_user_index = i
removed_messages = []
# Remove all messages after the last 'role': 'user'
if last_user_index is not None:
removed_messages = self.messages[last_user_index:]
self.messages = self.messages[:last_user_index]
print("") # Aesthetics.
# Print out a preview of what messages were removed.
for message in removed_messages:
if 'content' in message and message['content'] != None:
print(Markdown(f"**Removed message:** `\"{message['content'][:30]}...\"`"))
elif 'function_call' in message:
print(Markdown(f"**Removed codeblock**")) # TODO: Could add preview of code removed here.
print("") # Aesthetics.
def handle_help(self, arguments):
commands_description = {
"%debug [true/false]": "Toggle debug mode. Without arguments or with 'true', it enters debug mode. With 'false', it exits debug mode.",
"%reset": "Resets the current session.",
"%undo": "Remove previous messages and its response from the message history.",
"%save_message [path]": "Saves messages to a specified JSON path. If no path is provided, it defaults to 'messages.json'.",
"%load_message [path]": "Loads messages from a specified JSON path. If no path is provided, it defaults to 'messages.json'.",
"%help": "Show this help message.",
}
base_message = [
"> **Available Commands:**\n\n"
]
# Add each command and its description to the message
for cmd, desc in commands_description.items():
base_message.append(f"- `{cmd}`: {desc}\n")
additional_info = [
"\n\nFor further assistance, please join our community Discord or consider contributing to the project's development."
]
# Combine the base message with the additional info
full_message = base_message + additional_info
print(Markdown("".join(full_message)))
def handle_debug(self, arguments=None):
if arguments == "" or arguments == "true":
print(Markdown("> Entered debug mode"))
print(self.messages)
self.debug_mode = True
elif arguments == "false":
print(Markdown("> Exited debug mode"))
self.debug_mode = False
else:
print(Markdown("> Unknown argument to debug command."))
def handle_reset(self, arguments):
self.reset()
print(Markdown("> Reset Done"))
def default_handle(self, arguments):
print(Markdown("> Unknown command"))
self.handle_help(arguments)
def handle_save_message(self, json_path):
if json_path == "":
json_path = "messages.json"
if not json_path.endswith(".json"):
json_path += ".json"
with open(json_path, 'w') as f:
json.dump(self.messages, f, indent=2)
print(Markdown(f"> messages json export to {os.path.abspath(json_path)}"))
def handle_load_message(self, json_path):
if json_path == "":
json_path = "messages.json"
if not json_path.endswith(".json"):
json_path += ".json"
with open(json_path, 'r') as f:
self.load(json.load(f))
print(Markdown(f"> messages json loaded from {os.path.abspath(json_path)}"))
def handle_command(self, user_input):
# split the command into the command and the arguments, by the first whitespace
switch = {
"help": self.handle_help,
"debug": self.handle_debug,
"reset": self.handle_reset,
"save_message": self.handle_save_message,
"load_message": self.handle_load_message,
"undo": self.handle_undo,
}
user_input = user_input[1:].strip() # Capture the part after the `%`
command = user_input.split(" ")[0]
arguments = user_input[len(command):].strip()
action = switch.get(command,
self.default_handle) # Get the function from the dictionary, or default_handle if not found
action(arguments) # Execute the function
def chat(self, message=None, return_messages=False):
# Connect to an LLM (an large language model)
if not self.local:
# gpt-4
self.verify_api_key()
# ^ verify_api_key may set self.local to True, so we run this as an 'if', not 'elif':
if self.local:
# Code-Llama
if self.llama_instance == None:
# Find or install Code-Llama
try:
self.llama_instance = get_hf_llm(self.model, self.debug_mode, self.context_window)
if self.llama_instance == None:
# They cancelled.
return
except:
traceback.print_exc()
# If it didn't work, apologize and switch to GPT-4
print(Markdown("".join([
f"> Failed to install `{self.model}`.",
f"\n\n**Common Fixes:** You can follow our simple setup docs at the link below to resolve common errors.\n\n```\nhttps://github.com/KillianLucas/open-interpreter/tree/main/docs\n```",
f"\n\n**If you've tried that and you're still getting an error, we have likely not built the proper `{self.model}` support for your system.**",
"\n\n*( Running language models locally is a difficult task!* If you have insight into the best way to implement this across platforms/architectures, please join the Open Interpreter community Discord and consider contributing the project's development. )",
"\n\nPress enter to switch to `GPT-4` (recommended)."
])))
input()
# Switch to GPT-4
self.local = False
self.model = "gpt-3.5-turbo"
self.verify_api_key()
# Display welcome message
welcome_message = ""
if self.debug_mode:
welcome_message += "> Entered debug mode"
# If self.local, we actually don't use self.model
# (self.auto_run is like advanced usage, we display no messages)
if not self.local and not self.auto_run:
if self.use_azure:
notice_model = f"{self.azure_deployment_name} (Azure)"
else:
notice_model = f"{self.model.upper()}"
welcome_message += f"\n> Model set to `{notice_model}`\n\n**Tip:** To run locally, use `interpreter --local`"
if self.local:
welcome_message += f"\n> Model set to `{self.model}`"
# If not auto_run, tell the user we'll ask permission to run code
# We also tell them here how to exit Open Interpreter
if not self.auto_run:
welcome_message += "\n\n" + confirm_mode_message
welcome_message = welcome_message.strip()
# Print welcome message with newlines on either side (aesthetic choice)
# unless we're starting with a blockquote (aesthetic choice)
if welcome_message != "":
if welcome_message.startswith(">"):
print(Markdown(welcome_message), '')
else:
print('', Markdown(welcome_message), '')
# Check if `message` was passed in by user
if message:
# If it was, we respond non-interactivley
self.messages.append({"role": "user", "content": message})
self.respond()
else:
# If it wasn't, we start an interactive chat
while True:
try:
user_input = input("> ").strip()
except EOFError:
break
except KeyboardInterrupt:
print() # Aesthetic choice
break
# Use `readline` to let users up-arrow to previous user messages,
# which is a common behavior in terminals.
readline.add_history(user_input)
# If the user input starts with a `%` or `/`, it's a command
if user_input.startswith("%") or user_input.startswith("/"):
self.handle_command(user_input)
continue
# Add the user message to self.messages
self.messages.append({"role": "user", "content": user_input})
# Respond, but gracefully handle CTRL-C / KeyboardInterrupt
try:
self.respond()
except KeyboardInterrupt:
pass
finally:
# Always end the active block. Multiple Live displays = issues
self.end_active_block()
if return_messages:
return self.messages
def verify_api_key(self):
"""
Makes sure we have an AZURE_API_KEY or OPENAI_API_KEY.
"""
if self.use_azure:
all_env_available = (
('AZURE_API_KEY' in os.environ or 'OPENAI_API_KEY' in os.environ) and
'AZURE_API_BASE' in os.environ and
'AZURE_API_VERSION' in os.environ and
'AZURE_DEPLOYMENT_NAME' in os.environ)
if all_env_available:
self.api_key = os.environ.get('AZURE_API_KEY') or os.environ['OPENAI_API_KEY']
self.azure_api_base = os.environ['AZURE_API_BASE']
self.azure_api_version = os.environ['AZURE_API_VERSION']
self.azure_deployment_name = os.environ['AZURE_DEPLOYMENT_NAME']
self.azure_api_type = os.environ.get('AZURE_API_TYPE', 'azure')
else:
# This is probably their first time here!
self._print_welcome_message()
time.sleep(1)
print(Rule(style="white"))
print(Markdown(missing_azure_info_message), '', Rule(style="white"), '')
response = input("Azure OpenAI API key: ")
if response == "":
# User pressed `enter`, requesting Code-Llama
print(Markdown(
"> Switching to `Code-Llama`...\n\n**Tip:** Run `interpreter --local` to automatically use `Code-Llama`."),
'')
time.sleep(2)
print(Rule(style="white"))
# Temporarily, for backwards (behavioral) compatability, we've moved this part of llama_2.py here.
# AND BELOW.
# This way, when folks hit interpreter --local, they get the same experience as before.
import inquirer
print('', Markdown("**Open Interpreter** will use `Code Llama` for local execution. Use your arrow keys to set up the model."), '')
models = {
'7B': 'TheBloke/CodeLlama-7B-Instruct-GGUF',
'13B': 'TheBloke/CodeLlama-13B-Instruct-GGUF',
'34B': 'TheBloke/CodeLlama-34B-Instruct-GGUF'
}
parameter_choices = list(models.keys())
questions = [inquirer.List('param', message="Parameter count (smaller is faster, larger is more capable)", choices=parameter_choices)]
answers = inquirer.prompt(questions)
chosen_param = answers['param']
# THIS is more in line with the future. You just say the model you want by name:
self.model = models[chosen_param]
self.local = True
return
else:
self.api_key = response
self.azure_api_base = input("Azure OpenAI API base: ")
self.azure_deployment_name = input("Azure OpenAI deployment name of GPT: ")
self.azure_api_version = input("Azure OpenAI API version: ")
print('', Markdown(
"**Tip:** To save this key for later, run `export AZURE_API_KEY=your_api_key AZURE_API_BASE=your_api_base AZURE_API_VERSION=your_api_version AZURE_DEPLOYMENT_NAME=your_gpt_deployment_name` on Mac/Linux or `setx AZURE_API_KEY your_api_key AZURE_API_BASE your_api_base AZURE_API_VERSION your_api_version AZURE_DEPLOYMENT_NAME your_gpt_deployment_name` on Windows."),
'')
time.sleep(2)
print(Rule(style="white"))
litellm.api_type = self.azure_api_type
litellm.api_base = self.azure_api_base
litellm.api_version = self.azure_api_version
litellm.api_key = self.api_key
else:
if self.api_key == None:
if 'OPENAI_API_KEY' in os.environ:
self.api_key = os.environ['OPENAI_API_KEY']
else:
# This is probably their first time here!
self._print_welcome_message()
time.sleep(1)
print(Rule(style="white"))
print(Markdown(missing_api_key_message), '', Rule(style="white"), '')
response = input("OpenAI API key: ")
if response == "":
# User pressed `enter`, requesting Code-Llama
print(Markdown(
"> Switching to `Code-Llama`...\n\n**Tip:** Run `interpreter --local` to automatically use `Code-Llama`."),
'')
time.sleep(2)
print(Rule(style="white"))
# Temporarily, for backwards (behavioral) compatability, we've moved this part of llama_2.py here.
# AND ABOVE.
# This way, when folks hit interpreter --local, they get the same experience as before.
import inquirer
print('', Markdown("**Open Interpreter** will use `Code Llama` for local execution. Use your arrow keys to set up the model."), '')
models = {
'7B': 'TheBloke/CodeLlama-7B-Instruct-GGUF',
'13B': 'TheBloke/CodeLlama-13B-Instruct-GGUF',
'34B': 'TheBloke/CodeLlama-34B-Instruct-GGUF'
}
parameter_choices = list(models.keys())
questions = [inquirer.List('param', message="Parameter count (smaller is faster, larger is more capable)", choices=parameter_choices)]
answers = inquirer.prompt(questions)
chosen_param = answers['param']
# THIS is more in line with the future. You just say the model you want by name:
self.model = models[chosen_param]
self.local = True
return
else:
self.api_key = response
print('', Markdown("**Tip:** To save this key for later, run `export OPENAI_API_KEY=your_api_key` on Mac/Linux or `setx OPENAI_API_KEY your_api_key` on Windows."), '')
time.sleep(2)
print(Rule(style="white"))
litellm.api_key = self.api_key
if self.api_base:
litellm.api_base = self.api_base
def end_active_block(self):
if self.active_block:
self.active_block.end()
self.active_block = None
def respond(self):
# Add relevant info to system_message
# (e.g. current working directory, username, os, etc.)
info = self.get_info_for_system_message()
# This is hacky, as we should have a different (minified) prompt for CodeLLama,
# but for now, to make the prompt shorter and remove "run_code" references, just get the first 2 lines:
if self.local:
self.system_message = "\n".join(self.system_message.split("\n")[:2])
self.system_message += "\nOnly do what the user asks you to do, then ask what they'd like to do next."
system_message = self.system_message + "\n\n" + info
if self.local:
messages = tt.trim(self.messages, max_tokens=(self.context_window-self.max_tokens-25), system_message=system_message)
else:
messages = tt.trim(self.messages, self.model, system_message=system_message)
if self.debug_mode:
print("\n", "Sending `messages` to LLM:", "\n")
print(messages)
print()
# Make LLM call
if not self.local:
# GPT
error = ""
for _ in range(3): # 3 retries
try:
if self.use_azure:
response = litellm.completion(
f"azure/{self.azure_deployment_name}",
messages=messages,
functions=[function_schema],
temperature=self.temperature,
stream=True,
)
else:
if self.api_base:
# The user set the api_base. litellm needs this to be "custom/{model}"
response = litellm.completion(
api_base=self.api_base,
model = "custom/" + self.model,
messages=messages,
functions=[function_schema],
stream=True,
temperature=self.temperature,
)
else:
# Normal OpenAI call
response = litellm.completion(
model=self.model,
messages=messages,
functions=[function_schema],
stream=True,
temperature=self.temperature,
)
break
except:
if self.debug_mode:
traceback.print_exc()
error = traceback.format_exc()
time.sleep(3)
else:
raise Exception(error)
elif self.local:
# Code-Llama
# Convert messages to prompt
# (This only works if the first message is the only system message)
def messages_to_prompt(messages):
for message in messages:
# Happens if it immediatly writes code
if "role" not in message:
message["role"] = "assistant"
# Falcon prompt template
if "falcon" in self.model.lower():
formatted_messages = ""
for message in messages:
formatted_messages += f"{message['role'].capitalize()}: {message['content']}\n"
formatted_messages = formatted_messages.strip()
else:
# Llama prompt template
# Extracting the system prompt and initializing the formatted string with it.
system_prompt = messages[0]['content']
formatted_messages = f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n"
# Loop starting from the first user message
for index, item in enumerate(messages[1:]):
role = item['role']
content = item['content']
if role == 'user':
formatted_messages += f"{content} [/INST] "
elif role == 'function':
formatted_messages += f"Output: {content} [/INST] "
elif role == 'assistant':
formatted_messages += f"{content} </s><s>[INST] "
# Remove the trailing '<s>[INST] ' from the final output
if formatted_messages.endswith("<s>[INST] "):
formatted_messages = formatted_messages[:-10]
return formatted_messages
prompt = messages_to_prompt(messages)
# Lmao i can't believe this works (it does need this btw)
if messages[-1]["role"] != "function":
prompt += "Let's explore this. By the way, I can run code on your machine by writing the code in a markdown code block. This works for shell, javascript, python, R, and applescript. I'm going to try to do this for your task. Anyway, "
elif messages[-1]["role"] == "function" and messages[-1]["content"] != "No output":
prompt += "Given the output of the code I just ran, "
elif messages[-1]["role"] == "function" and messages[-1]["content"] == "No output":
prompt += "Given the fact that the code I just ran produced no output, "
if self.debug_mode:
# we have to use builtins bizarrely! because rich.print interprets "[INST]" as something meaningful
import builtins
builtins.print("TEXT PROMPT SEND TO LLM:\n", prompt)
# Run Code-Llama
response = self.llama_instance(
prompt,
stream=True,
temperature=self.temperature,
stop=["</s>"],
max_tokens=750 # context window is set to 1800, messages are trimmed to 1000... 700 seems nice
)
# Initialize message, function call trackers, and active block
self.messages.append({})
in_function_call = False
llama_function_call_finished = False
self.active_block = None
for chunk in response:
if self.use_azure and ('choices' not in chunk or len(chunk['choices']) == 0):
# Azure OpenAI Service may return empty chunk
continue
if self.local:
if "content" not in messages[-1]:
# This is the first chunk. We'll need to capitalize it, because our prompt ends in a ", "
chunk["choices"][0]["text"] = chunk["choices"][0]["text"].capitalize()
# We'll also need to add "role: assistant", CodeLlama will not generate this
messages[-1]["role"] = "assistant"
delta = {"content": chunk["choices"][0]["text"]}
else:
delta = chunk["choices"][0]["delta"]
# Accumulate deltas into the last message in messages
self.messages[-1] = merge_deltas(self.messages[-1], delta)
# Check if we're in a function call
if not self.local:
condition = "function_call" in self.messages[-1]
elif self.local:
# Since Code-Llama can't call functions, we just check if we're in a code block.
# This simply returns true if the number of "```" in the message is odd.
if "content" in self.messages[-1]:
condition = self.messages[-1]["content"].count("```") % 2 == 1
else:
# If it hasn't made "content" yet, we're certainly not in a function call.
condition = False
if condition:
# We are in a function call.
# Check if we just entered a function call
if in_function_call == False:
# If so, end the last block,
self.end_active_block()
# Print newline if it was just a code block or user message
# (this just looks nice)
last_role = self.messages[-2]["role"]
if last_role == "user" or last_role == "function":
print()
# then create a new code block
self.active_block = CodeBlock()
# Remember we're in a function_call
in_function_call = True
# Now let's parse the function's arguments:
if not self.local:
# gpt-4
# Parse arguments and save to parsed_arguments, under function_call
if "arguments" in self.messages[-1]["function_call"]:
arguments = self.messages[-1]["function_call"]["arguments"]
new_parsed_arguments = parse_partial_json(arguments)
if new_parsed_arguments:
# Only overwrite what we have if it's not None (which means it failed to parse)
self.messages[-1]["function_call"][
"parsed_arguments"] = new_parsed_arguments
elif self.local:
# Code-Llama
# Parse current code block and save to parsed_arguments, under function_call
if "content" in self.messages[-1]:
content = self.messages[-1]["content"]
if "```" in content:
# Split by "```" to get the last open code block
blocks = content.split("```")
current_code_block = blocks[-1]
lines = current_code_block.split("\n")
if content.strip() == "```": # Hasn't outputted a language yet
language = None
else:
if lines[0] != "":
language = lines[0].strip()
else:
language = "python"
# In anticipation of its dumbassery let's check if "pip" is in there
if len(lines) > 1:
if lines[1].startswith("pip"):
language = "shell"
# Join all lines except for the language line
code = '\n'.join(lines[1:]).strip("` \n")
arguments = {"code": code}
if language: # We only add this if we have it-- the second we have it, an interpreter gets fired up (I think? maybe I'm wrong)
if language == "bash":
language = "shell"
arguments["language"] = language
# Code-Llama won't make a "function_call" property for us to store this under, so:
if "function_call" not in self.messages[-1]:
self.messages[-1]["function_call"] = {}
self.messages[-1]["function_call"]["parsed_arguments"] = arguments
else:
# We are not in a function call.
# Check if we just left a function call
if in_function_call == True:
if self.local:
# This is the same as when gpt-4 gives finish_reason as function_call.
# We have just finished a code block, so now we should run it.
llama_function_call_finished = True
# Remember we're not in a function_call
in_function_call = False
# If there's no active block,
if self.active_block == None:
# Create a message block
self.active_block = MessageBlock()
# Update active_block
self.active_block.update_from_message(self.messages[-1])
# Check if we're finished
if chunk["choices"][0]["finish_reason"] or llama_function_call_finished:
if chunk["choices"][
0]["finish_reason"] == "function_call" or llama_function_call_finished:
# Time to call the function!
# (Because this is Open Interpreter, we only have one function.)
if self.debug_mode:
print("Running function:")
print(self.messages[-1])
print("---")
# Ask for user confirmation to run code
if self.auto_run == False:
# End the active block so you can run input() below it
# Save language and code so we can create a new block in a moment
self.active_block.end()
language = self.active_block.language
code = self.active_block.code
# Prompt user
response = input(" Would you like to run this code? (y/n)\n\n ")
print("") # <- Aesthetic choice
if response.strip().lower() == "y":
# Create a new, identical block where the code will actually be run
self.active_block = CodeBlock()
self.active_block.language = language
self.active_block.code = code
else:
# User declined to run code.
self.active_block.end()
self.messages.append({
"role":
"function",
"name":
"run_code",
"content":
"User decided not to run this code."
})
return
# If we couldn't parse its arguments, we need to try again.
if not self.local and "parsed_arguments" not in self.messages[-1]["function_call"]:
# After collecting some data via the below instruction to users,
# This is the most common failure pattern: https://github.com/KillianLucas/open-interpreter/issues/41
# print("> Function call could not be parsed.\n\nPlease open an issue on Github (openinterpreter.com, click Github) and paste the following:")
# print("\n", self.messages[-1]["function_call"], "\n")
# time.sleep(2)
# print("Informing the language model and continuing...")
# Since it can't really be fixed without something complex,
# let's just berate the LLM then go around again.
self.messages.append({
"role": "function",
"name": "run_code",
"content": """Your function call could not be parsed. Please use ONLY the `run_code` function, which takes two parameters: `code` and `language`. Your response should be formatted as a JSON."""
})
self.respond()
return
# Create or retrieve a Code Interpreter for this language
language = self.messages[-1]["function_call"]["parsed_arguments"][
"language"]
if language not in self.code_interpreters:
self.code_interpreters[language] = CodeInterpreter(language, self.debug_mode)
code_interpreter = self.code_interpreters[language]
# Let this Code Interpreter control the active_block
code_interpreter.active_block = self.active_block
code_interpreter.run()
# End the active_block
self.active_block.end()
# Append the output to messages
# Explicitly tell it if there was no output (sometimes "" = hallucinates output)
self.messages.append({
"role": "function",
"name": "run_code",
"content": self.active_block.output if self.active_block.output else "No output"
})
# Go around again
self.respond()
if chunk["choices"][0]["finish_reason"] != "function_call":
# Done!
# Code Llama likes to output "###" at the end of every message for some reason
if self.local and "content" in self.messages[-1]:
self.messages[-1]["content"] = self.messages[-1]["content"].strip().rstrip("#")
self.active_block.update_from_message(self.messages[-1])
time.sleep(0.1)
self.active_block.end()
return
def _print_welcome_message(self):
current_version = pkg_resources.get_distribution("open-interpreter").version
print(f"\n Hello, Welcome to [bold]● Open Interpreter[/bold]. (v{current_version})\n") | [
"User decided not to run this code.",
"No output",
"Your function call could not be parsed. Please use ONLY the `run_code` function, which takes two parameters: `code` and `language`. Your response should be formatted as a JSON.",
"Let's explore this. By the way, I can run code on your machine by writing the code in a markdown code block. This works for shell, javascript, python, R, and applescript. I'm going to try to do this for your task. Anyway, ",
"Given the output of the code I just ran, ",
"content",
"Given the fact that the code I just ran produced no output, "
] |
2024-01-10 | TheCodeofMonteCristo/Creative-Writers-Toolkit | 1Create%20some%20characters.py | # -*- coding: utf-8 -*-
"""
Created on Thu May 26 09:00:55 2022
@author: user
"""
import os
import openai
from time import time,sleep
import re
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from tkinter.filedialog import askopenfilename
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
openai.api_key = open_file('openaiapikey.txt')
def save_file(content, filepath):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def isFileExists(filename):
return os.path.isfile(filename)
def gpt3_completion(prompt, engine='text-davinci-002', temp=1.0, top_p=1.0, tokens=1000, freq_pen=0.5, pres_pen=0.0, stop=['asdfasdf']):
max_retry = 5
retry = 0
while True:
try:
response = openai.Completion.create(
engine=engine, # use this for standard models
#model=engine, # use this for finetuned model
prompt=prompt,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,
stop=stop)
text = response['choices'][0]['text'].strip()
#text = re.sub('\s+', ' ', text)
#save_gpt3_log(prompt, text)
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
return None
print('Error communicating with OpenAI:', oops)
sleep(1)
def find_number(s):
for i in range(len(s)):
if s[i].isdigit():
return s[i:]
return None
def remove_linebreaks(s):
return re.sub(r'[\r\n]+', '', s)
def remove_nonprintable(s):
return re.sub(r'[^\x00-\x7F]+','', s)
def remove_spaces(s):
#remove_chars = '< > : " / \ | ? *.!@#$%^&*(){}[].,-?`;:'
#s = s.translate({ ord(c): None for c in remove_chars })
#return s
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
if __name__ == '__main__':
print("This code creates characters for your stories")
print("You can start from scratch, or use other text files (such as story synopses, series information, or other characters)")
print("to spark the ideas")
print("You can create multiple characters at a time")
print("This code will create two files per character: a detailed character breakdown, and a precis")
#ask for genre
storytype=input("Genre or type of story your character will appear in (leave blank if you like):")
#ask for supporting files
print("For supporting files, you can bring in a synopsis, and brainstorm a main, or supporting character")
print("or you can import another character and brainstorm their husband, or enemy, etc. ")
print("alternatively you could create a text file describing an alien race, and brainstorm a member of that species")
print("in fact any text file will do - keep it short though. Less is more")
supporting=input("Load supporting files(y/n)?")
supportingfiles=""
if supporting=="y":
while True:
ftext=""
root = Tk()
root.filename = askopenfilename(initialdir = "/",title = "Select supporting file",filetypes = (("text files","*.txt"),("all files","*.*")))
#print (root.filename)
scenename=os.path.basename(root.filename)
folderpath=os. path. dirname(root.filename)
print(folderpath)
ftext=""
with open(root.filename, "r") as f:
ftext = f.read()
root.destroy()
#load file
supportingfiles=supportingfiles+ftext+"\n"
an=input("Load Another(y/n)?")
if an=="n":
break
#ask for tweaks
print("Now you can add any additional info. This can be a short description")
print("eg. 'a character you'd find in a murder mystery'")
print("or 'the antagonist in the above story (if you've loaded in a supporting synopsis)")
print("or 'someone with a grudge against the character above' - again, any note will work!")
tweak=""
tweak=input("Additional character information?")
if tweak !="":
tweak="Make the character "+tweak
#ask for folder
nul=input("choose a folder to save the characters (hit return to continue)")
root = Tk()
root.attributes("-topmost", True)
folder_location = tk.filedialog.askdirectory()
root.destroy()
print ("Folder chosen: "+folder_location)
#ask how many characters
chars=1
ch=input("How many characters shall we brainstorm?")
chars=int(ch)
#load in prompt and assemble
folder = "planprompts/"
filename = "prompt01.txt"
filepath = os.path.join(folder, filename)
with open(filepath, "r") as f:
prompt = f.read()
prompt = prompt.replace('<<STORYTYPE>>', storytype).replace('<<SUPPORTINGFILES>>', supportingfiles).replace('<<TWEAK>>', tweak)
#call gpt3
for char in range (1,chars+1):
print("Querying GPT3..........................................")
completion1 = gpt3_completion(prompt)
#get character name
completion1="1)"+completion1
completion1 = completion1.replace(r'\n', '\n\n')
name="000"
name = completion1[completion1.find("Name:")+len("Name:"):completion1.find("2)")]
name = remove_linebreaks(name)
name = remove_nonprintable(name)
name = remove_spaces(name)
if name == None:
name=""
name=name+str(char)+".txt"
print("character name:"+name)
print(completion1)
#create precis
print("Querying GPT3..........................................")
completion2 = gpt3_completion("Create a brief, 1 pargagraph summary of the following character"+"\n"+ completion1+"\n"+name+"\nSUMMARY:")
print (completion2)
#save files
filepath = os.path.join(folder_location, name)
with open(filepath,"w",encoding="utf-8") as f:
f.write(completion1)
filepath = os.path.join(folder_location, "Precis_"+name)
with open(filepath,"w",encoding="utf-8") as f:
f.write(completion2)
#find character name and save detailed character
#call gpt3 again to create precis and save
| [
"<<STORYTYPE>>",
"<<SUPPORTINGFILES>>"
] |
2024-01-10 | TheCodeofMonteCristo/Creative-Writers-Toolkit | 2Create%20some%20synopses.py | # -*- coding: utf-8 -*-
"""
Created on Thu May 26 09:00:55 2022
@author: user
"""
import os
import openai
from time import time,sleep
import re
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from tkinter.filedialog import askopenfilename
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
openai.api_key = open_file('openaiapikey.txt')
def save_file(content, filepath):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def isFileExists(filename):
return os.path.isfile(filename)
def gpt3_completion(prompt, engine='text-davinci-002', temp=1.0, top_p=1.0, tokens=1000, freq_pen=0.5, pres_pen=0.0, stop=['asdfasdf']):
max_retry = 5
retry = 0
while True:
try:
response = openai.Completion.create(
engine=engine, # use this for standard models
#model=engine, # use this for finetuned model
prompt=prompt,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,
stop=stop)
text = response['choices'][0]['text'].strip()
text = re.sub('\s+', ' ', text)
#save_gpt3_log(prompt, text)
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
return None
print('Error communicating with OpenAI:', oops)
sleep(1)
def find_number(s):
for i in range(len(s)):
if s[i].isdigit():
return s[i:]
return None
def remove_linebreaks(s):
return re.sub(r'[\r\n]+', '', s)
def remove_nonprintable(s):
return re.sub(r'[^\x00-\x7F]+','', s)
def remove_spaces(s):
return s.replace(' ', '')
def remove_speechmarks(s):
remove_chars = '< > : " / \ | ? *.!@#$%^&*(){}[].,-?`;:'
s = s.translate({ ord(c): None for c in remove_chars })
return s
if __name__ == '__main__':
print("This code creates synopses")
print("You can start from scratch, or use other text files (such as story backgrounds, series information, or characters)")
print("to spark the ideas")
print("You can create multiple stories at a time")
#print("This code will create two files per story: a detailed breakdown, and a short summary")
#ask for genre
storytype=input("Genre or type of story (leave blank if you like):")
#ask for supporting files
print("For supporting files, you can bring in characters who will appear in the story")
print("or you can import background info like desciptions of themes, or ideas you want to explore ")
print("alternatively you could create a text file giving the backstory, or details of the world of the story")
print("in fact any text file will do - keep it short though. Less is more")
supporting=input("Load supporting files(y/n)?")
supportingfiles=""
if supporting=="y":
while True:
ftext=""
root = Tk()
root.filename = askopenfilename(initialdir = "/",title = "Select supporting file",filetypes = (("text files","*.txt"),("all files","*.*")))
#print (root.filename)
scenename=os.path.basename(root.filename)
folderpath=os. path. dirname(root.filename)
print(folderpath)
ftext=""
with open(root.filename, "r") as f:
ftext = f.read()
root.destroy()
#load file
supportingfiles=supportingfiles+ftext+"\n"
an=input("Load Another(y/n)?")
if an=="n":
break
if supportingfiles=="":
supportingfiles=="Intelligent, original storyline with large amounts of colour and detail."
#ask for tweaks
print("Now you can add any additional info. This can be a short description")
print("eg. 'a kids story about robots'")
print("or 'A story in which the above character is the hero (if you've loaded in a protagonist character)")
print("or 'a story set in the world described above - again, any note will work!")
tweak=""
tweak=input("Additional Story information?")
if tweak !="":
tweak="Make the story "+tweak
#ask for folder
nul=input("choose a folder to save the storylines (hit return to continue)")
root = Tk()
root.attributes("-topmost", True)
folder_location = tk.filedialog.askdirectory()
root.destroy()
print ("Folder chosen: "+folder_location)
#ask how many characters
chars=1
ch=input("How many stories shall we brainstorm?")
chars=int(ch)
#ask for a title
title=input("Story Title:")
#load in prompt and assemble
folder = "planprompts/"
filename = "synopsisprompt.txt"
filepath = os.path.join(folder, filename)
with open(filepath, "r") as f:
prompt = f.read()
prompt = prompt.replace('<<STORYTYPE>>', storytype).replace('<<SUPPORTINGFILES>>', supportingfiles).replace('<<TWEAK>>', tweak)
#call gpt3
for char in range (1,chars+1):
print("Querying GPT3..........................................")
completion1 = gpt3_completion(prompt)
#get story name
completion1="The story begins "+completion1
completion1 = completion1.replace(r'\n', '\n\n')
name=title+str(char)+".txt"
print("File title:"+name)
print(completion1)
#create scene breakdown
#folder = "planprompts/"
#filepath = os.path.join(folder, "synopsistoscenelist_multi.txt")
#with open(filepath, "r") as f:
# prompt8 = f.read()
#3 run the prompt
#scriptprompt= prompt8.replace('<<STORYTYPE>>', storytype).replace('<<SUPPORTINGFILES>>', supportingfiles).replace('<<TWEAK>>', tweak).replace('<<SYNOPSIS>>', completion1)
#print("QUERYING GPT3_____________________________________")
#completion8="frogspawn"
#completion8 = gpt3_completion(scriptprompt)
#completion8 = completion8.replace(r'\n', '\n')
#completion8="SCENE001:"+completion8
#print(completion8)
#save files
filepath = os.path.join(folder_location, "synopsis_"+name)
with open(filepath,"w",encoding="utf-8") as f:
f.write(completion1)
#filepath = os.path.join(folder_location, "story_breakdown_"+name)
#with open(filepath,"w",encoding="utf-8") as f:
# f.write(completion8)
#find character name and save detailed character
#call gpt3 again to create precis and save
| [
"<<STORYTYPE>>",
"<<SUPPORTINGFILES>>"
] |
2024-01-10 | CHAVI22/FAQ_GENERATION | get_span.py |
# get_answer.py
from coherence_span import get_coherent_sentences
from pipelines import span_pipeline
import re
from textblob import TextBlob
import pickle
import nltk.tokenize as nt
from nltk import sent_tokenize
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('brown')
'''
python -m textblob.download_corpora
'''
def get_entities(sent):
namedEnt = []
b = TextBlob(sent)
for n in b.noun_phrases:
if len(n) > 4 and n not in namedEnt:
if n[0] not in ["'"]:
namedEnt.append(n)
return namedEnt
x = span_pipeline("span-extraction", span_model="valhalla/t5-small-qa-qg-hl",
span_tokenizer="valhalla/t5-small-qa-qg-hl")
'''
Given a passage of text this function will return a list of dictionaries.
Each dictionary has the following keys:
'sentence' -> Holds an individual sentence from passage
'spans' -> list of spans extarcted from the given passage
'questions' -> an empty list initialized. Will be filled with questions from question generation module
'answers' -> an empty list initialized. Will be filled with answers from answer generation module
'''
def check_input(passage):
if (bool(re.match('^[ !@#$%^&*\-()_+:;=|\\>.,]*$', passage)) == True):
print("invalid")
return False
else:
print("valid")
return True
def extract_spans(context):
if check_input(context):
# print(context)
res = get_spans(context)
return res
else:
return []
def get_spans(context):
ext_spans = []
gen_res = []
c = sent_tokenize(context)
# print(c)
spanDict = dict()
for i in list(x(context)):
# print("inside the for loop for x of context")
# print("\n")
# print("\n")
ext_spans = []
sent = i[0]
# print(sent)
span = i[1]
# print(span)
if sent not in spanDict:
spanDict[sent] = []
for a in span:
new_a = (a.strip("<pad>")).strip()
if new_a not in spanDict[sent]:
spanDict[sent].append(new_a)
ext_spans.append(new_a)
for j in x(sent):
for span in j[1]:
a = (span.strip("<pad>")).strip()
if a not in spanDict[sent]:
spanDict[sent].append(a)
ext_spans.append(a)
ent = get_entities(sent)
temp_ext_spans = [i.lower() for i in ext_spans]
for n in ent:
if n not in temp_ext_spans:
temp_ext_spans.append(n)
ext_spans.append(n)
spanDict[sent].append(n)
# print("Getting coherent sentences")
combs = get_coherent_sentences(c)
# print(combs)
for i in combs:
for j in x(i):
ext_spans = []
sent = j[0]
span = j[1]
# print("current sentence : ",sent)
# print("current answers : ",ans)
if sent not in spanDict:
spanDict[sent] = []
for a in span:
new_a = (a.strip("<pad>")).strip()
if new_a not in spanDict[sent] and abs(len(new_a)-len(sent)) > 5:
spanDict[sent].append(new_a)
ext_spans.append(new_a)
ent = get_entities(sent)
# print("additional answers : ",ent)
temp_ext_spans = [i.lower() for i in ext_spans]
for n in ent:
if n not in temp_ext_spans:
temp_ext_spans.append(n)
ext_spans.append(n)
spanDict[sent].append(n)
for i in spanDict:
ind_sent = {}
ind_sent["sentence"] = i
ind_sent["spans"] = spanDict[i]
ind_sent["questions"] = []
ind_sent["answers"] = []
ind_sent["context"] = context
gen_res.append(ind_sent)
# print(ind_sent)
return gen_res
| [] |
2024-01-10 | SandyLudosky/RAG-Integration | python-examples~How%20to%20build%20an%20OpenAI%20Assistant%20with%20Internet%20browsing~assistant.py | import os
import json
import openai
from dotenv import load_dotenv
from tavily import TavilyClient
load_dotenv()
# Initialize clients with API keys
client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
assistant_prompt_instruction = """You are knowledgeable about every topic.
Your goal is to use internet resources to provide answers.
You should never use
your own knowledge to answer questions.
I will add link to relevant resources at the end of the answer.
"""
# Function to perform a Tavily search
# https://github.com/assafelovic/tavily-python
def tavily_search(query):
search_result = tavily_client.get_search_context(
query, search_depth="advanced", max_tokens=8000
)
return search_result
# Function to wait for a run to complete
def wait_for_run_completion(thread_id, run_id):
while True:
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Current run status: {run.status}")
if run.status in ["completed", "failed", "requires_action"]:
return run
# Function to handle tool output submission
def submit_tool_outputs(thread_id, run_id, tools_to_call):
tool_output_array = []
for tool in tools_to_call:
output = None
tool_call_id = tool.id
function_name = tool.function.name
function_args = tool.function.arguments
if function_name == "search-news":
output = tavily_search(query=json.loads(function_args)["query"])
if output:
tool_output_array.append({"tool_call_id": tool_call_id, "output": output})
return client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id, run_id=run_id, tool_outputs=tool_output_array
)
# Function to print messages from a thread
def print_messages_from_thread(thread_id):
messages = client.beta.threads.messages.list(thread_id=thread_id)
for msg in messages:
print(f"{msg.role}: {msg.content[0].text.value}")
return messages
def create_assistant():
# Create an assistant
assistant = client.beta.assistants.create(
instructions=assistant_prompt_instruction,
model="gpt-3.5-turbo-1106",
tools=[
{
"type": "function",
"function": {
"name": "search-news",
"description": "Get information on recent events from the web.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query to use. For example: 'Latest news on Nvidia stock performance'",
},
},
"required": ["query"],
},
},
}
],
)
assistant_id = assistant.id
print(f"Assistant ID: {assistant_id}")
# Create a thread
thread = client.beta.threads.create()
print(f"Thread: {thread}")
return assistant_id, thread
def call_assistant(user_input):
# Create assistant()
assistant_id, thread = create_assistant()
# Ongoing conversation loop
while True:
# Create a message
client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=user_input,
)
# Create a run
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)
print(f"Run ID: {run.id}")
# Wait for run to complete
run = wait_for_run_completion(thread.id, run.id)
if run.status == "failed":
print(run.error)
continue
elif run.status == "requires_action":
run = submit_tool_outputs(
thread.id, run.id, run.required_action.submit_tool_outputs.tool_calls
)
run = wait_for_run_completion(thread.id, run.id)
# Print messages from the thread
return print_messages_from_thread(thread.id)
def generate_chat_completion(user_input):
messages = call_assistant(user_input)
return messages
| [
"You are knowledgeable about every topic.\nYour goal is to use internet resources to provide answers.\nYou should never use\nyour own knowledge to answer questions.\nI will add link to relevant resources at the end of the answer.\n"
] |
2024-01-10 | souradip-chakraborty/pg_travel | mujoco~agent~tnpg.py | import numpy as np
from utils.utils import *
from hparams import HyperParams as hp
def get_returns(rewards, masks):
rewards = torch.Tensor(rewards)
masks = torch.Tensor(masks)
returns = torch.zeros_like(rewards)
running_returns = 0
for t in reversed(range(0, len(rewards))):
running_returns = rewards[t] + hp.gamma * running_returns * masks[t]
returns[t] = running_returns
returns = (returns - returns.mean()) / returns.std()
return returns
def get_loss(actor, returns, states, actions):
mu, std, logstd = actor(torch.Tensor(states))
log_policy = log_density(torch.Tensor(actions), mu, std, logstd)
returns = returns.unsqueeze(1)
objective = returns * log_policy
objective = objective.mean()
return objective
def train_critic(critic, states, returns, critic_optim):
criterion = torch.nn.MSELoss()
n = len(states)
arr = np.arange(n)
for epoch in range(5):
np.random.shuffle(arr)
for i in range(n // hp.batch_size):
batch_index = arr[hp.batch_size * i: hp.batch_size * (i + 1)]
batch_index = torch.LongTensor(batch_index)
inputs = torch.Tensor(states)[batch_index]
target = returns.unsqueeze(1)[batch_index]
values = critic(inputs)
loss = criterion(values, target)
critic_optim.zero_grad()
loss.backward()
critic_optim.step()
def fisher_vector_product(actor, states, p):
p.detach()
kl = kl_divergence(new_actor=actor, old_actor=actor, states=states)
kl = kl.mean()
kl_grad = torch.autograd.grad(kl, actor.parameters(), create_graph=True)
kl_grad = flat_grad(kl_grad) # check kl_grad == 0
kl_grad_p = (kl_grad * p).sum()
kl_hessian_p = torch.autograd.grad(kl_grad_p, actor.parameters())
kl_hessian_p = flat_hessian(kl_hessian_p)
return kl_hessian_p + 0.1 * p
# from openai baseline code
# https://github.com/openai/baselines/blob/master/baselines/common/cg.py
def conjugate_gradient(actor, states, b, nsteps, residual_tol=1e-10):
x = torch.zeros(b.size())
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
_Avp = fisher_vector_product(actor, states, p)
alpha = rdotr / torch.dot(p, _Avp)
x += alpha * p
r -= alpha * _Avp
new_rdotr = torch.dot(r, r)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
if rdotr < residual_tol:
break
return x
def train_model(actor, critic, memory, actor_optim, critic_optim):
memory = np.array(memory)
states = np.vstack(memory[:, 0])
actions = list(memory[:, 1])
rewards = list(memory[:, 2])
masks = list(memory[:, 3])
# ----------------------------
# step 1: get returns
returns = get_returns(rewards, masks)
# ----------------------------
# step 2: train critic several steps with respect to returns
train_critic(critic, states, returns, critic_optim)
# ----------------------------
# step 3: get gradient of loss and hessian of kl
loss = get_loss(actor, returns, states, actions)
loss_grad = torch.autograd.grad(loss, actor.parameters())
loss_grad = flat_grad(loss_grad)
step_dir = conjugate_gradient(actor, states, loss_grad.data, nsteps=10)
# ----------------------------
# step 4: get step direction and step size and update actor
params = flat_params(actor)
new_params = params + 0.5 * step_dir
update_model(actor, new_params)
| [] |
2024-01-10 | NTDXYG/COTTON | RQ1~zero_shot.py | from time import sleep
import openai
from transformers import CodeGenForCausalLM
openai.api_base = 'https://api.closeai-asia.com/v1'
openai.api_key = 'sk-k9ZTt7Lchrgu1pFv4iZGt5XAbKmh8jB7V85rCweHBzLc2FkI'
def CoT_openai(code, model='gpt-3.5-turbo'):
content = '''
### Given a piece of code, output the corresponding implementation idea.
### Example:
#### Input:
from typing import List
def below_zero(operations: List[int]) -> bool:
""" You're given a list of deposit and withdrawal operations on a bank account that starts with
zero balance. Your task is to detect if at any point the balance of account falls below zero, and
at that point function should return True. Otherwise it should return False.
"""
#### Output:
How to solve:
Step 1. Initialize account balance as 0.
Step 2. Iterate through operations.
-add value to account balance.
-If account balance < 0, return True.
Step 3. Return False.
### Input:
'''
try:
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": content},
{"role": "user", "content": code},
],
temperature=0
)
except:
sleep(5)
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": content},
{"role": "user", "content": code},
],
temperature=0
)
return response['choices'][0]['message']['content']
if __name__ == '__main__':
code = '''
def check_numbers(numbers):
""" Given a list of integers, return True if all numbers are even or all numbers are odd. Otherwise, return False.
The function should return False if the given list is empty.
"""
### Output:
'''
result = CoT_openai(code)
print(result)
# How to solve:
# Step 1. Initialize an empty list to store the even numbers.
# Step 2. Iterate through the range from a to b.
# -Check if the current number is even.
# -If it is, append it to the list.
# Step 3. Return the list of even numbers. | [
"\ndef check_numbers(numbers):\n \"\"\" Given a list of integers, return True if all numbers are even or all numbers are odd. Otherwise, return False.\n The function should return False if the given list is empty.\n \"\"\"\n \n### Output:\n ",
"\n### Given a piece of code, output the corresponding implementation idea.\n### Example:\n#### Input:\nfrom typing import List\n\n\ndef below_zero(operations: List[int]) -> bool:\n \"\"\" You're given a list of deposit and withdrawal operations on a bank account that starts with\n zero balance. Your task is to detect if at any point the balance of account falls below zero, and\n at that point function should return True. Otherwise it should return False.\n \"\"\"\n\n#### Output:\nHow to solve:\nStep 1. Initialize account balance as 0.\nStep 2. Iterate through operations.\n -add value to account balance.\n -If account balance < 0, return True.\nStep 3. Return False.\n\n### Input:\n\n "
] |
2024-01-10 | Royce-Geospatial-Consultants/h2ogpt_rg | src~image_doctr.py | """
Based upon ImageCaptionLoader in LangChain version: langchain/document_loaders/image_captions.py
But accepts preloaded model to avoid slowness in use and CUDA forking issues
Loader that uses H2O DocTR OCR models to extract text from images
"""
from typing import List, Union, Any, Tuple, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders import ImageCaptionLoader
import numpy as np
from utils import get_device, clear_torch_cache
from doctr.utils.common_types import AbstractFile
class H2OOCRLoader(ImageCaptionLoader):
"""Loader that extracts text from images"""
def __init__(self, path_images: Union[str, List[str]] = None, layout_aware=False, gpu_id=None):
super().__init__(path_images)
self._ocr_model = None
self.layout_aware = layout_aware
self.gpu_id = gpu_id
self.device = 'cpu'
# ensure self.device set
self.set_context()
def set_context(self):
if get_device() == 'cuda':
import torch
n_gpus = torch.cuda.device_count() if torch.cuda.is_available else 0
if n_gpus > 0:
self.context_class = torch.device
if self.gpu_id is not None:
self.device = torch.device("cuda:%d" % self.gpu_id)
else:
self.device = 'cuda'
else:
self.device = 'cpu'
else:
self.device = 'cpu'
def load_model(self):
try:
from weasyprint import HTML # to avoid warning
from doctr.models.zoo import ocr_predictor
except ImportError:
raise ValueError(
"`doctr` package not found, please install with "
"`pip install git+https://github.com/h2oai/doctr.git`."
)
if self._ocr_model:
self._ocr_model = self._ocr_model.to(self.device)
return self
self.set_context()
self._ocr_model = ocr_predictor(det_arch="db_resnet50", reco_arch="crnn_efficientnetv2_mV2",
pretrained=True).to(self.device)
return self
def unload_model(self):
if hasattr(self._ocr_model.det_predictor.model, 'cpu'):
self._ocr_model.det_predictor.model.cpu()
clear_torch_cache()
if hasattr(self._ocr_model.reco_predictor.model, 'cpu'):
self._ocr_model.reco_predictor.model.cpu()
clear_torch_cache()
if hasattr(self._ocr_model, 'cpu'):
self._ocr_model.cpu()
clear_torch_cache()
def set_document_paths(self, document_paths: Union[str, List[str]]):
"""
Load from a list of image files
"""
if isinstance(document_paths, str):
self.document_paths = [document_paths]
else:
self.document_paths = document_paths
def load(self, prompt=None) -> List[Document]:
if self._ocr_model is None:
self.load_model()
results = []
for document_path in self.document_paths:
caption, metadata = self._get_captions_and_metadata(
model=self._ocr_model, document_path=document_path
)
doc = Document(page_content=" \n".join(caption), metadata=metadata)
results.append(doc)
return results
def _get_captions_and_metadata(
self, model: Any, document_path: str) -> Tuple[list, dict]:
"""
Helper function for getting the captions and metadata of an image
"""
try:
from doctr.io import DocumentFile
except ImportError:
raise ValueError(
"`doctr` package not found, please install with "
"`pip install git+https://github.com/h2oai/doctr.git`."
)
try:
if document_path.lower().endswith(".pdf"):
# load at roughly 300 dpi
images = read_pdf(document_path)
else:
images = DocumentFile.from_images(document_path)
except Exception:
raise ValueError(f"Could not get image data for {document_path}")
document_words = []
for image in images:
ocr_output = model([image])
page_words = []
page_boxes = []
for block_num, block in enumerate(ocr_output.pages[0].blocks):
for line_num, line in enumerate(block.lines):
for word_num, word in enumerate(line.words):
if not (word.value or "").strip():
continue
page_words.append(word.value)
page_boxes.append(
[word.geometry[0][0], word.geometry[0][1], word.geometry[1][0], word.geometry[1][1]])
if self.layout_aware:
ids = boxes_sort(page_boxes)
texts = [page_words[i] for i in ids]
text_boxes = [page_boxes[i] for i in ids]
page_words = space_layout(texts=texts, boxes=text_boxes)
else:
page_words = " ".join(page_words)
document_words.append(page_words)
metadata: dict = {"image_path": document_path}
return document_words, metadata
def boxes_sort(boxes):
""" From left top to right bottom
Params:
boxes: [[x1, y1, x2, y2], [x1, y1, x2, y2], ...]
"""
sorted_id = sorted(range(len(boxes)), key=lambda x: (boxes[x][1]))
# sorted_boxes = [boxes[id] for id in sorted_id]
return sorted_id
def is_same_line(box1, box2):
"""
Params:
box1: [x1, y1, x2, y2]
box2: [x1, y1, x2, y2]
"""
box1_midy = (box1[1] + box1[3]) / 2
box2_midy = (box2[1] + box2[3]) / 2
if box1_midy < box2[3] and box1_midy > box2[1] and box2_midy < box1[3] and box2_midy > box1[1]:
return True
else:
return False
def union_box(box1, box2):
"""
Params:
box1: [x1, y1, x2, y2]
box2: [x1, y1, x2, y2]
"""
x1 = min(box1[0], box2[0])
y1 = min(box1[1], box2[1])
x2 = max(box1[2], box2[2])
y2 = max(box1[3], box2[3])
return [x1, y1, x2, y2]
def space_layout(texts, boxes):
line_boxes = []
line_texts = []
max_line_char_num = 0
line_width = 0
# print(f"len_boxes: {len(boxes)}")
boxes = np.array(boxes)
texts = np.array(texts)
while len(boxes) > 0:
box = boxes[0]
mid = (boxes[:, 3] + boxes[:, 1]) / 2
inline_boxes = np.logical_and(mid > box[1], mid < box[3])
sorted_xs = np.argsort(boxes[inline_boxes][:, 0], axis=0)
line_box = boxes[inline_boxes][sorted_xs]
line_text = texts[inline_boxes][sorted_xs]
boxes = boxes[~inline_boxes]
texts = texts[~inline_boxes]
line_boxes.append(line_box.tolist())
line_texts.append(line_text.tolist())
if len(" ".join(line_texts[-1])) > max_line_char_num:
max_line_char_num = len(" ".join(line_texts[-1]))
line_width = np.array(line_boxes[-1])
line_width = line_width[:, 2].max() - line_width[:, 0].min()
char_width = (line_width / max_line_char_num) if max_line_char_num > 0 else 0
if char_width == 0:
char_width = 1
space_line_texts = []
for i, line_box in enumerate(line_boxes):
space_line_text = ""
for j, box in enumerate(line_box):
left_char_num = int(box[0] / char_width)
left_char_num = max((left_char_num - len(space_line_text)), 1)
# verbose layout
# space_line_text += " " * left_char_num
# minified layout
if left_char_num > 1:
space_line_text += f" <{left_char_num}> "
else:
space_line_text += " "
space_line_text += line_texts[i][j]
space_line_texts.append(space_line_text + "\n")
return "".join(space_line_texts)
def read_pdf(
file: AbstractFile,
scale: float = 300 / 72,
rgb_mode: bool = True,
password: Optional[str] = None,
**kwargs: Any,
) -> List[np.ndarray]:
"""Read a PDF file and convert it into an image in numpy format
>>> from doctr.documents import read_pdf
>>> doc = read_pdf("path/to/your/doc.pdf")
Args:
file: the path to the PDF file
scale: rendering scale (1 corresponds to 72dpi)
rgb_mode: if True, the output will be RGB, otherwise BGR
password: a password to unlock the document, if encrypted
kwargs: additional parameters to :meth:`pypdfium2.PdfPage.render`
Returns:
the list of pages decoded as numpy ndarray of shape H x W x C
"""
# Rasterise pages to numpy ndarrays with pypdfium2
import pypdfium2 as pdfium
pdf = pdfium.PdfDocument(file, password=password, autoclose=True)
return [page.render(scale=scale, rev_byteorder=rgb_mode, **kwargs).to_numpy() for page in pdf]
| [] |
2024-01-10 | Royce-Geospatial-Consultants/h2ogpt_rg | src~gradio_runner.py | import ast
import copy
import functools
import inspect
import itertools
import json
import os
import pprint
import random
import shutil
import sys
import time
import traceback
import uuid
import filelock
import numpy as np
import pandas as pd
import requests
from iterators import TimeoutIterator
from gradio_utils.css import get_css
from gradio_utils.prompt_form import make_chatbots
from src.db_utils import set_userid, get_username_direct
# This is a hack to prevent Gradio from phoning home when it gets imported
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
def my_get(url, **kwargs):
print('Gradio HTTP request redirected to localhost :)', flush=True)
kwargs.setdefault('allow_redirects', True)
return requests.api.request('get', 'http://127.0.0.1/', **kwargs)
original_get = requests.get
requests.get = my_get
import gradio as gr
requests.get = original_get
def fix_pydantic_duplicate_validators_error():
try:
from pydantic import class_validators
class_validators.in_ipython = lambda: True # type: ignore[attr-defined]
except ImportError:
pass
fix_pydantic_duplicate_validators_error()
from enums import DocumentSubset, no_model_str, no_lora_str, no_server_str, LangChainAction, LangChainMode, \
DocumentChoice, langchain_modes_intrinsic, LangChainTypes, langchain_modes_non_db, gr_to_lg, invalid_key_msg, \
LangChainAgent, docs_ordering_types, docs_token_handlings, docs_joiner_default
from gradio_themes import H2oTheme, SoftTheme, get_h2o_title, get_simple_title, \
get_dark_js, get_heap_js, wrap_js_to_lambda, \
spacing_xsm, radius_xsm, text_xsm
from prompter import prompt_type_to_model_name, prompt_types_strings, inv_prompt_type_to_model_lower, non_hf_types, \
get_prompt
from utils import flatten_list, zip_data, s3up, clear_torch_cache, get_torch_allocated, system_info_print, \
ping, makedirs, get_kwargs, system_info, ping_gpu, get_url, get_local_ip, \
save_generate_output, url_alive, remove, dict_to_html, text_to_html, lg_to_gr, str_to_dict, have_serpapi
from gen import get_model, languages_covered, evaluate, score_qa, inputs_kwargs_list, \
get_max_max_new_tokens, get_minmax_top_k_docs, history_to_context, langchain_actions, langchain_agents_list, \
evaluate_fake, merge_chat_conversation_history
from evaluate_params import eval_func_param_names, no_default_param_names, eval_func_param_names_defaults, \
input_args_list, key_overrides
from apscheduler.schedulers.background import BackgroundScheduler
def fix_text_for_gradio(text, fix_new_lines=False, fix_latex_dollars=True):
if not isinstance(text, str):
# e.g. list for extraction
text = str(text)
if fix_latex_dollars:
ts = text.split('```')
for parti, part in enumerate(ts):
inside = parti % 2 == 1
if not inside:
ts[parti] = ts[parti].replace('$', '﹩')
text = '```'.join(ts)
if fix_new_lines:
# let Gradio handle code, since got improved recently
## FIXME: below conflicts with Gradio, but need to see if can handle multiple \n\n\n etc. properly as is.
# ensure good visually, else markdown ignores multiple \n
# handle code blocks
ts = text.split('```')
for parti, part in enumerate(ts):
inside = parti % 2 == 1
if not inside:
ts[parti] = ts[parti].replace('\n', '<br>')
text = '```'.join(ts)
return text
def is_valid_key(enforce_h2ogpt_api_key, enforce_h2ogpt_ui_key, h2ogpt_api_keys, h2ogpt_key1, requests_state1=None):
from_ui = isinstance(requests_state1, dict) and 'username' in requests_state1 and requests_state1['username']
if from_ui and not enforce_h2ogpt_ui_key:
# no token barrier
return 'not enforced'
elif not from_ui and not enforce_h2ogpt_api_key:
# no token barrier
return 'not enforced'
else:
valid_key = False
if isinstance(h2ogpt_api_keys, list) and h2ogpt_key1 in h2ogpt_api_keys:
# passed token barrier
valid_key = True
elif isinstance(h2ogpt_api_keys, str) and os.path.isfile(h2ogpt_api_keys):
with filelock.FileLock(h2ogpt_api_keys + '.lock'):
with open(h2ogpt_api_keys, 'rt') as f:
h2ogpt_api_keys = json.load(f)
if h2ogpt_key1 in h2ogpt_api_keys:
valid_key = True
return valid_key
def go_gradio(**kwargs):
allow_api = kwargs['allow_api']
is_public = kwargs['is_public']
is_hf = kwargs['is_hf']
memory_restriction_level = kwargs['memory_restriction_level']
n_gpus = kwargs['n_gpus']
admin_pass = kwargs['admin_pass']
model_states = kwargs['model_states']
dbs = kwargs['dbs']
db_type = kwargs['db_type']
visible_langchain_actions = kwargs['visible_langchain_actions']
visible_langchain_agents = kwargs['visible_langchain_agents']
allow_upload_to_user_data = kwargs['allow_upload_to_user_data']
allow_upload_to_my_data = kwargs['allow_upload_to_my_data']
enable_sources_list = kwargs['enable_sources_list']
enable_url_upload = kwargs['enable_url_upload']
enable_text_upload = kwargs['enable_text_upload']
use_openai_embedding = kwargs['use_openai_embedding']
hf_embedding_model = kwargs['hf_embedding_model']
load_db_if_exists = kwargs['load_db_if_exists']
migrate_embedding_model = kwargs['migrate_embedding_model']
auto_migrate_db = kwargs['auto_migrate_db']
captions_model = kwargs['captions_model']
caption_loader = kwargs['caption_loader']
doctr_loader = kwargs['doctr_loader']
n_jobs = kwargs['n_jobs']
verbose = kwargs['verbose']
# for dynamic state per user session in gradio
model_state0 = kwargs['model_state0']
score_model_state0 = kwargs['score_model_state0']
my_db_state0 = kwargs['my_db_state0']
selection_docs_state0 = kwargs['selection_docs_state0']
visible_models_state0 = kwargs['visible_models_state0']
# For Heap analytics
is_heap_analytics_enabled = kwargs['enable_heap_analytics']
heap_app_id = kwargs['heap_app_id']
# easy update of kwargs needed for evaluate() etc.
queue = True
allow_upload = allow_upload_to_user_data or allow_upload_to_my_data
allow_upload_api = allow_api and allow_upload
kwargs.update(locals())
# import control
if kwargs['langchain_mode'] != 'Disabled':
from gpt_langchain import file_types, have_arxiv
else:
have_arxiv = False
file_types = []
if 'mbart-' in kwargs['model_lower']:
instruction_label_nochat = "Text to translate"
else:
instruction_label_nochat = "Instruction (Shift-Enter or push Submit to send message," \
" use Enter for multiple input lines)"
title = 'h2oGPT'
if kwargs['visible_h2ogpt_header']:
description = """<iframe src="https://ghbtns.com/github-btn.html?user=h2oai&repo=h2ogpt&type=star&count=true&size=small" frameborder="0" scrolling="0" width="280" height="20" title="GitHub"></iframe><small><a href="https://github.com/h2oai/h2ogpt">h2oGPT</a> <a href="https://evalgpt.ai/">LLM Leaderboard</a> <a href="https://github.com/h2oai/h2o-llmstudio">LLM Studio</a><br /><a href="https://codellama.h2o.ai">CodeLlama</a> <br /><a href="https://huggingface.co/h2oai">🤗 Models</a>"""
else:
description = None
description_bottom = "If this host is busy, try<br>[Multi-Model](https://gpt.h2o.ai)<br>[CodeLlama](https://codellama.h2o.ai)<br>[Llama2 70B](https://llama.h2o.ai)<br>[Falcon 40B](https://falcon.h2o.ai)<br>[HF Spaces1](https://huggingface.co/spaces/h2oai/h2ogpt-chatbot)<br>[HF Spaces2](https://huggingface.co/spaces/h2oai/h2ogpt-chatbot2)<br>"
if is_hf:
description_bottom += '''<a href="https://huggingface.co/spaces/h2oai/h2ogpt-chatbot?duplicate=true"><img src="https://bit.ly/3gLdBN6" style="white-space: nowrap" alt="Duplicate Space"></a>'''
task_info_md = ''
css_code = get_css(kwargs)
if kwargs['gradio_offline_level'] >= 0:
# avoid GoogleFont that pulls from internet
if kwargs['gradio_offline_level'] == 1:
# front end would still have to download fonts or have cached it at some point
base_font = 'Source Sans Pro'
else:
base_font = 'Helvetica'
theme_kwargs = dict(font=(base_font, 'ui-sans-serif', 'system-ui', 'sans-serif'),
font_mono=('IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'))
else:
theme_kwargs = dict()
if kwargs['gradio_size'] == 'xsmall':
theme_kwargs.update(dict(spacing_size=spacing_xsm, text_size=text_xsm, radius_size=radius_xsm))
elif kwargs['gradio_size'] in [None, 'small']:
theme_kwargs.update(dict(spacing_size=gr.themes.sizes.spacing_sm, text_size=gr.themes.sizes.text_sm,
radius_size=gr.themes.sizes.spacing_sm))
elif kwargs['gradio_size'] == 'large':
theme_kwargs.update(dict(spacing_size=gr.themes.sizes.spacing_lg, text_size=gr.themes.sizes.text_lg),
radius_size=gr.themes.sizes.spacing_lg)
elif kwargs['gradio_size'] == 'medium':
theme_kwargs.update(dict(spacing_size=gr.themes.sizes.spacing_md, text_size=gr.themes.sizes.text_md,
radius_size=gr.themes.sizes.spacing_md))
theme = H2oTheme(**theme_kwargs) if kwargs['h2ocolors'] else SoftTheme(**theme_kwargs)
demo = gr.Blocks(theme=theme, css=css_code, title="h2oGPT", analytics_enabled=False)
callback = gr.CSVLogger()
model_options0 = flatten_list(list(prompt_type_to_model_name.values())) + kwargs['extra_model_options']
if kwargs['base_model'].strip() not in model_options0:
model_options0 = [kwargs['base_model'].strip()] + model_options0
lora_options = kwargs['extra_lora_options']
if kwargs['lora_weights'].strip() not in lora_options:
lora_options = [kwargs['lora_weights'].strip()] + lora_options
server_options = kwargs['extra_server_options']
if kwargs['inference_server'].strip() not in server_options:
server_options = [kwargs['inference_server'].strip()] + server_options
if os.getenv('OPENAI_API_KEY'):
if 'openai_chat' not in server_options:
server_options += ['openai_chat']
if 'openai' not in server_options:
server_options += ['openai']
# always add in no lora case
# add fake space so doesn't go away in gradio dropdown
model_options0 = [no_model_str] + sorted(model_options0)
lora_options = [no_lora_str] + sorted(lora_options)
server_options = [no_server_str] + sorted(server_options)
# always add in no model case so can free memory
# add fake space so doesn't go away in gradio dropdown
# transcribe, will be detranscribed before use by evaluate()
if not kwargs['base_model'].strip():
kwargs['base_model'] = no_model_str
if not kwargs['lora_weights'].strip():
kwargs['lora_weights'] = no_lora_str
if not kwargs['inference_server'].strip():
kwargs['inference_server'] = no_server_str
# transcribe for gradio
kwargs['gpu_id'] = str(kwargs['gpu_id'])
no_model_msg = 'h2oGPT [ !!! Please Load Model in Models Tab !!! ]'
output_label0 = f'h2oGPT [Model: {kwargs.get("base_model")}]' if kwargs.get(
'base_model') else no_model_msg
output_label0_model2 = no_model_msg
def update_prompt(prompt_type1, prompt_dict1, model_state1, which_model=0):
if not prompt_type1 or which_model != 0:
# keep prompt_type and prompt_dict in sync if possible
prompt_type1 = kwargs.get('prompt_type', prompt_type1)
prompt_dict1 = kwargs.get('prompt_dict', prompt_dict1)
# prefer model specific prompt type instead of global one
if not prompt_type1 or which_model != 0:
prompt_type1 = model_state1.get('prompt_type', prompt_type1)
prompt_dict1 = model_state1.get('prompt_dict', prompt_dict1)
if not prompt_dict1 or which_model != 0:
# if still not defined, try to get
prompt_dict1 = kwargs.get('prompt_dict', prompt_dict1)
if not prompt_dict1 or which_model != 0:
prompt_dict1 = model_state1.get('prompt_dict', prompt_dict1)
return prompt_type1, prompt_dict1
def visible_models_to_model_choice(visible_models1, api=False):
if isinstance(visible_models1, list):
assert len(
visible_models1) >= 1, "Invalid visible_models1=%s, can only be single entry" % visible_models1
# just take first
model_active_choice1 = visible_models1[0]
elif isinstance(visible_models1, (str, int)):
model_active_choice1 = visible_models1
else:
assert isinstance(visible_models1, type(None)), "Invalid visible_models1=%s" % visible_models1
model_active_choice1 = visible_models1
if model_active_choice1 is not None:
if isinstance(model_active_choice1, str):
base_model_list = [x['base_model'] for x in model_states]
if model_active_choice1 in base_model_list:
# if dups, will just be first one
model_active_choice1 = base_model_list.index(model_active_choice1)
else:
# NOTE: Could raise, but sometimes raising in certain places fails too hard and requires UI restart
if api:
raise ValueError(
"Invalid model %s, valid models are: %s" % (model_active_choice1, base_model_list))
model_active_choice1 = 0
else:
model_active_choice1 = 0
return model_active_choice1
default_kwargs = {k: kwargs[k] for k in eval_func_param_names_defaults}
# ensure prompt_type consistent with prep_bot(), so nochat API works same way
default_kwargs['prompt_type'], default_kwargs['prompt_dict'] = \
update_prompt(default_kwargs['prompt_type'], default_kwargs['prompt_dict'],
model_state1=model_state0,
which_model=visible_models_to_model_choice(kwargs['visible_models']))
for k in no_default_param_names:
default_kwargs[k] = ''
def dummy_fun(x):
# need dummy function to block new input from being sent until output is done,
# else gets input_list at time of submit that is old, and shows up as truncated in chatbot
return x
def update_auth_selection(auth_user, selection_docs_state1, save=False):
# in-place update of both
if 'selection_docs_state' not in auth_user:
auth_user['selection_docs_state'] = selection_docs_state0
for k, v in auth_user['selection_docs_state'].items():
if isinstance(selection_docs_state1[k], dict):
if save:
auth_user['selection_docs_state'][k].clear()
auth_user['selection_docs_state'][k].update(selection_docs_state1[k])
else:
selection_docs_state1[k].clear()
selection_docs_state1[k].update(auth_user['selection_docs_state'][k])
elif isinstance(selection_docs_state1[k], list):
if save:
auth_user['selection_docs_state'][k].clear()
auth_user['selection_docs_state'][k].extend(selection_docs_state1[k])
else:
selection_docs_state1[k].clear()
selection_docs_state1[k].extend(auth_user['selection_docs_state'][k])
else:
raise RuntimeError("Bad type: %s" % selection_docs_state1[k])
# BEGIN AUTH THINGS
def auth_func(username1, password1, auth_pairs=None, auth_filename=None,
auth_access=None,
auth_freeze=None,
guest_name=None,
selection_docs_state1=None,
selection_docs_state00=None,
**kwargs):
assert auth_freeze is not None
if selection_docs_state1 is None:
selection_docs_state1 = selection_docs_state00
assert selection_docs_state1 is not None
assert auth_filename and isinstance(auth_filename, str), "Auth file must be a non-empty string, got: %s" % str(
auth_filename)
if auth_access == 'open' and username1 == guest_name:
return True
if username1 == '':
# some issue with login
return False
with filelock.FileLock(auth_filename + '.lock'):
auth_dict = {}
if os.path.isfile(auth_filename):
try:
with open(auth_filename, 'rt') as f:
auth_dict = json.load(f)
except json.decoder.JSONDecodeError as e:
print("Auth exception: %s" % str(e), flush=True)
shutil.move(auth_filename, auth_filename + '.bak' + str(uuid.uuid4()))
auth_dict = {}
if username1 in auth_dict and username1 in auth_pairs:
if password1 == auth_dict[username1]['password'] and password1 == auth_pairs[username1]:
auth_user = auth_dict[username1]
update_auth_selection(auth_user, selection_docs_state1)
save_auth_dict(auth_dict, auth_filename)
return True
else:
return False
elif username1 in auth_dict:
if password1 == auth_dict[username1]['password']:
auth_user = auth_dict[username1]
update_auth_selection(auth_user, selection_docs_state1)
save_auth_dict(auth_dict, auth_filename)
return True
else:
return False
elif username1 in auth_pairs:
# copy over CLI auth to file so only one state to manage
auth_dict[username1] = dict(password=auth_pairs[username1], userid=str(uuid.uuid4()))
auth_user = auth_dict[username1]
update_auth_selection(auth_user, selection_docs_state1)
save_auth_dict(auth_dict, auth_filename)
return True
else:
if auth_access == 'closed':
return False
# open access
auth_dict[username1] = dict(password=password1, userid=str(uuid.uuid4()))
auth_user = auth_dict[username1]
update_auth_selection(auth_user, selection_docs_state1)
save_auth_dict(auth_dict, auth_filename)
if auth_access == 'open':
return True
else:
raise RuntimeError("Invalid auth_access: %s" % auth_access)
def auth_func_open(*args, **kwargs):
return True
def get_username(requests_state1):
username1 = None
if 'username' in requests_state1:
username1 = requests_state1['username']
return username1
def get_userid_auth_func(requests_state1, auth_filename=None, auth_access=None, guest_name=None, id0=None,
**kwargs):
if auth_filename and isinstance(auth_filename, str):
username1 = get_username(requests_state1)
if username1:
if username1 == guest_name:
return str(uuid.uuid4())
with filelock.FileLock(auth_filename + '.lock'):
if os.path.isfile(auth_filename):
with open(auth_filename, 'rt') as f:
auth_dict = json.load(f)
if username1 in auth_dict:
return auth_dict[username1]['userid']
# if here, then not persistently associated with username1,
# but should only be one-time asked if going to persist within a single session!
return id0 or str(uuid.uuid4())
get_userid_auth = functools.partial(get_userid_auth_func,
auth_filename=kwargs['auth_filename'],
auth_access=kwargs['auth_access'],
guest_name=kwargs['guest_name'],
)
if kwargs['auth_access'] == 'closed':
auth_message1 = "Closed access"
else:
auth_message1 = "WELCOME! Open access" \
" (%s/%s or any unique user/pass)" % (kwargs['guest_name'], kwargs['guest_name'])
if kwargs['auth_message'] is not None:
auth_message = kwargs['auth_message']
else:
auth_message = auth_message1
# always use same callable
auth_pairs0 = {}
if isinstance(kwargs['auth'], list):
for k, v in kwargs['auth']:
auth_pairs0[k] = v
authf = functools.partial(auth_func,
auth_pairs=auth_pairs0,
auth_filename=kwargs['auth_filename'],
auth_access=kwargs['auth_access'],
auth_freeze=kwargs['auth_freeze'],
guest_name=kwargs['guest_name'],
selection_docs_state00=copy.deepcopy(selection_docs_state0))
def get_request_state(requests_state1, request, db1s):
# if need to get state, do it now
if not requests_state1:
requests_state1 = requests_state0.copy()
if requests:
if not requests_state1.get('headers', '') and hasattr(request, 'headers'):
requests_state1.update(request.headers)
if not requests_state1.get('host', '') and hasattr(request, 'host'):
requests_state1.update(dict(host=request.host))
if not requests_state1.get('host2', '') and hasattr(request, 'client') and hasattr(request.client, 'host'):
requests_state1.update(dict(host2=request.client.host))
if not requests_state1.get('username', '') and hasattr(request, 'username'):
# use already-defined username instead of keep changing to new uuid
# should be same as in requests_state1
db_username = get_username_direct(db1s)
requests_state1.update(dict(username=request.username or db_username or str(uuid.uuid4())))
requests_state1 = {str(k): str(v) for k, v in requests_state1.items()}
return requests_state1
def user_state_setup(db1s, requests_state1, request: gr.Request, *args):
requests_state1 = get_request_state(requests_state1, request, db1s)
set_userid(db1s, requests_state1, get_userid_auth)
args_list = [db1s, requests_state1] + list(args)
return tuple(args_list)
# END AUTH THINGS
def allow_empty_instruction(langchain_mode1, document_subset1, langchain_action1):
allow = False
allow |= langchain_action1 not in LangChainAction.QUERY.value
allow |= document_subset1 in DocumentSubset.TopKSources.name
if langchain_mode1 in [LangChainMode.LLM.value]:
allow = False
return allow
image_loaders_options0, image_loaders_options, \
pdf_loaders_options0, pdf_loaders_options, \
url_loaders_options0, url_loaders_options = lg_to_gr(**kwargs)
jq_schema0 = '.[]'
with demo:
# avoid actual model/tokenizer here or anything that would be bad to deepcopy
# https://github.com/gradio-app/gradio/issues/3558
model_state = gr.State(
dict(model='model', tokenizer='tokenizer', device=kwargs['device'],
base_model=kwargs['base_model'],
tokenizer_base_model=kwargs['tokenizer_base_model'],
lora_weights=kwargs['lora_weights'],
inference_server=kwargs['inference_server'],
prompt_type=kwargs['prompt_type'],
prompt_dict=kwargs['prompt_dict'],
visible_models=visible_models_to_model_choice(kwargs['visible_models']),
h2ogpt_key=kwargs['h2ogpt_key'],
)
)
def update_langchain_mode_paths(selection_docs_state1):
dup = selection_docs_state1['langchain_mode_paths'].copy()
for k, v in dup.items():
if k not in selection_docs_state1['langchain_modes']:
selection_docs_state1['langchain_mode_paths'].pop(k)
for k in selection_docs_state1['langchain_modes']:
if k not in selection_docs_state1['langchain_mode_types']:
# if didn't specify shared, then assume scratch if didn't login or personal if logged in
selection_docs_state1['langchain_mode_types'][k] = LangChainTypes.PERSONAL.value
return selection_docs_state1
# Setup some gradio states for per-user dynamic state
model_state2 = gr.State(kwargs['model_state_none'].copy())
model_options_state = gr.State([model_options0])
lora_options_state = gr.State([lora_options])
server_options_state = gr.State([server_options])
my_db_state = gr.State(my_db_state0)
chat_state = gr.State({})
docs_state00 = kwargs['document_choice'] + [DocumentChoice.ALL.value]
docs_state0 = []
[docs_state0.append(x) for x in docs_state00 if x not in docs_state0]
docs_state = gr.State(docs_state0)
viewable_docs_state0 = ['None']
viewable_docs_state = gr.State(viewable_docs_state0)
selection_docs_state0 = update_langchain_mode_paths(selection_docs_state0)
selection_docs_state = gr.State(selection_docs_state0)
requests_state0 = dict(headers='', host='', username='')
requests_state = gr.State(requests_state0)
if description is not None:
gr.Markdown(f"""
{get_h2o_title(title, description) if kwargs['h2ocolors'] else get_simple_title(title, description)}
""")
# go button visible if
base_wanted = kwargs['base_model'] != no_model_str and kwargs['login_mode_if_model0']
go_btn = gr.Button(value="ENTER", visible=base_wanted, variant="primary")
nas = ' '.join(['NA'] * len(kwargs['model_states']))
res_value = "Response Score: NA" if not kwargs[
'model_lock'] else "Response Scores: %s" % nas
user_can_do_sum = kwargs['langchain_mode'] != LangChainMode.DISABLED.value and \
(kwargs['visible_side_bar'] or kwargs['visible_system_tab'])
if user_can_do_sum:
extra_prompt_form = ". For summarization, no query required, just click submit"
else:
extra_prompt_form = ""
if kwargs['input_lines'] > 1:
instruction_label = "Shift-Enter to Submit, Enter for more lines%s" % extra_prompt_form
else:
instruction_label = "Enter to Submit, Shift-Enter for more lines%s" % extra_prompt_form
def get_langchain_choices(selection_docs_state1):
langchain_modes = selection_docs_state1['langchain_modes']
if is_hf:
# don't show 'wiki' since only usually useful for internal testing at moment
no_show_modes = ['Disabled', 'wiki']
else:
no_show_modes = ['Disabled']
allowed_modes = langchain_modes.copy()
# allowed_modes = [x for x in allowed_modes if x in dbs]
allowed_modes += ['LLM']
if allow_upload_to_my_data and 'MyData' not in allowed_modes:
allowed_modes += ['MyData']
if allow_upload_to_user_data and 'UserData' not in allowed_modes:
allowed_modes += ['UserData']
choices = [x for x in langchain_modes if x in allowed_modes and x not in no_show_modes]
return choices
def get_df_langchain_mode_paths(selection_docs_state1, db1s, dbs1=None):
langchain_choices1 = get_langchain_choices(selection_docs_state1)
langchain_mode_paths = selection_docs_state1['langchain_mode_paths']
langchain_mode_paths = {k: v for k, v in langchain_mode_paths.items() if k in langchain_choices1}
if langchain_mode_paths:
langchain_mode_paths = langchain_mode_paths.copy()
for langchain_mode1 in langchain_modes_non_db:
langchain_mode_paths.pop(langchain_mode1, None)
df1 = pd.DataFrame.from_dict(langchain_mode_paths.items(), orient='columns')
df1.columns = ['Collection', 'Path']
df1 = df1.set_index('Collection')
else:
df1 = pd.DataFrame(None)
langchain_mode_types = selection_docs_state1['langchain_mode_types']
langchain_mode_types = {k: v for k, v in langchain_mode_types.items() if k in langchain_choices1}
if langchain_mode_types:
langchain_mode_types = langchain_mode_types.copy()
for langchain_mode1 in langchain_modes_non_db:
langchain_mode_types.pop(langchain_mode1, None)
df2 = pd.DataFrame.from_dict(langchain_mode_types.items(), orient='columns')
df2.columns = ['Collection', 'Type']
df2 = df2.set_index('Collection')
from src.gpt_langchain import get_persist_directory, load_embed
persist_directory_dict = {}
embed_dict = {}
chroma_version_dict = {}
for langchain_mode3 in langchain_mode_types:
langchain_type3 = langchain_mode_types.get(langchain_mode3, LangChainTypes.EITHER.value)
persist_directory3, langchain_type3 = get_persist_directory(langchain_mode3,
langchain_type=langchain_type3,
db1s=db1s, dbs=dbs1)
got_embedding3, use_openai_embedding3, hf_embedding_model3 = load_embed(
persist_directory=persist_directory3)
persist_directory_dict[langchain_mode3] = persist_directory3
embed_dict[langchain_mode3] = 'OpenAI' if not hf_embedding_model3 else hf_embedding_model3
if os.path.isfile(os.path.join(persist_directory3, 'chroma.sqlite3')):
chroma_version_dict[langchain_mode3] = 'ChromaDB>=0.4'
elif os.path.isdir(os.path.join(persist_directory3, 'index')):
chroma_version_dict[langchain_mode3] = 'ChromaDB<0.4'
elif not os.listdir(persist_directory3):
if db_type == 'chroma':
chroma_version_dict[langchain_mode3] = 'ChromaDB>=0.4' # will be
elif db_type == 'chroma_old':
chroma_version_dict[langchain_mode3] = 'ChromaDB<0.4' # will be
else:
chroma_version_dict[langchain_mode3] = 'Weaviate' # will be
if isinstance(hf_embedding_model, dict):
hf_embedding_model3 = hf_embedding_model['name']
else:
hf_embedding_model3 = hf_embedding_model
assert isinstance(hf_embedding_model3, str)
embed_dict[langchain_mode3] = hf_embedding_model3 # will be
else:
chroma_version_dict[langchain_mode3] = 'Weaviate'
df3 = pd.DataFrame.from_dict(persist_directory_dict.items(), orient='columns')
df3.columns = ['Collection', 'Directory']
df3 = df3.set_index('Collection')
df4 = pd.DataFrame.from_dict(embed_dict.items(), orient='columns')
df4.columns = ['Collection', 'Embedding']
df4 = df4.set_index('Collection')
df5 = pd.DataFrame.from_dict(chroma_version_dict.items(), orient='columns')
df5.columns = ['Collection', 'DB']
df5 = df5.set_index('Collection')
else:
df2 = pd.DataFrame(None)
df3 = pd.DataFrame(None)
df4 = pd.DataFrame(None)
df5 = pd.DataFrame(None)
df_list = [df2, df1, df3, df4, df5]
df_list = [x for x in df_list if x.shape[1] > 0]
if len(df_list) > 1:
df = df_list[0].join(df_list[1:]).replace(np.nan, '').reset_index()
elif len(df_list) == 0:
df = df_list[0].replace(np.nan, '').reset_index()
else:
df = pd.DataFrame(None)
return df
normal_block = gr.Row(visible=not base_wanted, equal_height=False, elem_id="col_container")
with normal_block:
side_bar = gr.Column(elem_id="sidebar", scale=1, min_width=100, visible=kwargs['visible_side_bar'])
with side_bar:
with gr.Accordion("Chats", open=False, visible=True):
radio_chats = gr.Radio(value=None, label="Saved Chats", show_label=False,
visible=True, interactive=True,
type='value')
upload_visible = kwargs['langchain_mode'] != 'Disabled' and allow_upload
with gr.Accordion("Upload", open=False, visible=upload_visible):
with gr.Column():
with gr.Row(equal_height=False):
fileup_output = gr.File(show_label=False,
file_types=['.' + x for x in file_types],
# file_types=['*', '*.*'], # for iPhone etc. needs to be unconstrained else doesn't work with extension-based restrictions
file_count="multiple",
scale=1,
min_width=0,
elem_id="warning", elem_classes="feedback",
)
fileup_output_text = gr.Textbox(visible=False)
max_quality = gr.Checkbox(label="Maximum Ingest Quality", value=kwargs['max_quality'],
visible=not is_public)
url_visible = kwargs['langchain_mode'] != 'Disabled' and allow_upload and enable_url_upload
url_label = 'URL/ArXiv' if have_arxiv else 'URL'
url_text = gr.Textbox(label=url_label,
# placeholder="Enter Submits",
max_lines=1,
interactive=True)
text_visible = kwargs['langchain_mode'] != 'Disabled' and allow_upload and enable_text_upload
user_text_text = gr.Textbox(label='Paste Text',
# placeholder="Enter Submits",
interactive=True,
visible=text_visible)
github_textbox = gr.Textbox(label="Github URL", visible=False) # FIXME WIP
database_visible = kwargs['langchain_mode'] != 'Disabled'
with gr.Accordion("Resources", open=False, visible=database_visible):
langchain_choices0 = get_langchain_choices(selection_docs_state0)
langchain_mode = gr.Radio(
langchain_choices0,
value=kwargs['langchain_mode'],
label="Collections",
show_label=True,
visible=kwargs['langchain_mode'] != 'Disabled',
min_width=100)
add_chat_history_to_context = gr.Checkbox(label="Chat History",
value=kwargs['add_chat_history_to_context'])
add_search_to_context = gr.Checkbox(label="Web Search",
value=kwargs['add_search_to_context'],
visible=os.environ.get('SERPAPI_API_KEY') is not None \
and have_serpapi)
document_subset = gr.Radio([x.name for x in DocumentSubset],
label="Subset",
value=DocumentSubset.Relevant.name,
interactive=True,
)
allowed_actions = [x for x in langchain_actions if x in visible_langchain_actions]
langchain_action = gr.Radio(
allowed_actions,
value=allowed_actions[0] if len(allowed_actions) > 0 else None,
label="Action",
visible=True)
allowed_agents = [x for x in langchain_agents_list if x in visible_langchain_agents]
if os.getenv('OPENAI_API_KEY') is None and LangChainAgent.JSON.value in allowed_agents:
allowed_agents.remove(LangChainAgent.JSON.value)
if os.getenv('OPENAI_API_KEY') is None and LangChainAgent.PYTHON.value in allowed_agents:
allowed_agents.remove(LangChainAgent.PYTHON.value)
if LangChainAgent.PANDAS.value in allowed_agents:
allowed_agents.remove(LangChainAgent.PANDAS.value)
langchain_agents = gr.Dropdown(
allowed_agents,
value=None,
label="Agents",
multiselect=True,
interactive=True,
visible=True,
elem_id="langchain_agents",
filterable=False)
visible_doc_track = upload_visible and kwargs['visible_doc_track'] and not kwargs[
'large_file_count_mode']
row_doc_track = gr.Row(visible=visible_doc_track)
with row_doc_track:
if kwargs['langchain_mode'] in langchain_modes_non_db:
doc_counts_str = "Pure LLM Mode"
else:
doc_counts_str = "Name: %s\nDocs: Unset\nChunks: Unset" % kwargs['langchain_mode']
text_doc_count = gr.Textbox(lines=3, label="Doc Counts", value=doc_counts_str,
visible=visible_doc_track)
text_file_last = gr.Textbox(lines=1, label="Newest Doc", value=None, visible=visible_doc_track)
text_viewable_doc_count = gr.Textbox(lines=2, label=None, visible=False)
col_tabs = gr.Column(elem_id="col-tabs", scale=10)
with col_tabs, gr.Tabs():
if kwargs['chat_tables']:
chat_tab = gr.Row(visible=True)
else:
chat_tab = gr.TabItem("Chat") \
if kwargs['visible_chat_tab'] else gr.Row(visible=False)
with chat_tab:
if kwargs['langchain_mode'] == 'Disabled':
text_output_nochat = gr.Textbox(lines=5, label=output_label0, show_copy_button=True,
visible=not kwargs['chat'])
else:
# text looks a bit worse, but HTML links work
text_output_nochat = gr.HTML(label=output_label0, visible=not kwargs['chat'])
with gr.Row():
# NOCHAT
instruction_nochat = gr.Textbox(
lines=kwargs['input_lines'],
label=instruction_label_nochat,
placeholder=kwargs['placeholder_instruction'],
visible=not kwargs['chat'],
)
iinput_nochat = gr.Textbox(lines=4, label="Input context for Instruction",
placeholder=kwargs['placeholder_input'],
value=kwargs['iinput'],
visible=not kwargs['chat'])
submit_nochat = gr.Button("Submit", size='sm', visible=not kwargs['chat'])
flag_btn_nochat = gr.Button("Flag", size='sm', visible=not kwargs['chat'])
score_text_nochat = gr.Textbox("Response Score: NA", show_label=False,
visible=not kwargs['chat'])
submit_nochat_api = gr.Button("Submit nochat API", visible=False)
submit_nochat_api_plain = gr.Button("Submit nochat API Plain", visible=False)
inputs_dict_str = gr.Textbox(label='API input for nochat', show_label=False, visible=False)
text_output_nochat_api = gr.Textbox(lines=5, label='API nochat output', visible=False,
show_copy_button=True)
visible_upload = (allow_upload_to_user_data or
allow_upload_to_my_data) and \
kwargs['langchain_mode'] != 'Disabled'
# CHAT
col_chat = gr.Column(visible=kwargs['chat'])
with col_chat:
with gr.Row():
with gr.Column(scale=50):
with gr.Row(elem_id="prompt-form-row"):
label_instruction = 'Ask anything'
instruction = gr.Textbox(
lines=kwargs['input_lines'],
label=label_instruction,
placeholder=instruction_label,
info=None,
elem_id='prompt-form',
container=True,
)
attach_button = gr.UploadButton(
elem_id="attach-button" if visible_upload else None,
value="",
label="Upload File(s)",
size="sm",
min_width=24,
file_types=['.' + x for x in file_types],
file_count="multiple",
visible=visible_upload)
submit_buttons = gr.Row(equal_height=False, visible=kwargs['visible_submit_buttons'])
with submit_buttons:
mw1 = 50
mw2 = 50
with gr.Column(min_width=mw1):
submit = gr.Button(value='Submit', variant='primary', size='sm',
min_width=mw1)
stop_btn = gr.Button(value="Stop", variant='secondary', size='sm',
min_width=mw1)
save_chat_btn = gr.Button("Save", size='sm', min_width=mw1)
with gr.Column(min_width=mw2):
retry_btn = gr.Button("Redo", size='sm', min_width=mw2)
undo = gr.Button("Undo", size='sm', min_width=mw2)
clear_chat_btn = gr.Button(value="Clear", size='sm', min_width=mw2)
visible_model_choice = bool(kwargs['model_lock']) and \
len(model_states) > 1 and \
kwargs['visible_visible_models']
with gr.Row(visible=visible_model_choice):
visible_models = gr.Dropdown(kwargs['all_models'],
label="Visible Models",
value=visible_models_state0,
interactive=True,
multiselect=True,
visible=visible_model_choice,
elem_id="multi-selection",
filterable=False,
)
text_output, text_output2, text_outputs = make_chatbots(output_label0, output_label0_model2,
**kwargs)
with gr.Row():
with gr.Column(visible=kwargs['score_model']):
score_text = gr.Textbox(res_value,
show_label=False,
visible=True)
score_text2 = gr.Textbox("Response Score2: NA", show_label=False,
visible=False and not kwargs['model_lock'])
doc_selection_tab = gr.TabItem("Document Selection") \
if kwargs['visible_doc_selection_tab'] else gr.Row(visible=False)
with doc_selection_tab:
if kwargs['langchain_mode'] in langchain_modes_non_db:
if langchain_mode == LangChainMode.DISABLED.value:
inactive_collection = "#### Document Q/A Disabled -- Chat only mode"
else:
dlabel1 = 'Choose Resources->Collections and Pick Collection'
inactive_collection = "#### Not Chatting with Any Collection\n%s" % dlabel1
active_collection = gr.Markdown(value=inactive_collection)
else:
dlabel1 = 'Select Subset of Document(s) for Chat with Collection: %s' % kwargs['langchain_mode']
active_collection = gr.Markdown(
value="#### Chatting with Collection: %s" % kwargs['langchain_mode'])
document_choice = gr.Dropdown(docs_state0,
label=dlabel1,
value=[DocumentChoice.ALL.value],
interactive=True,
multiselect=True,
visible=kwargs['langchain_mode'] != 'Disabled',
elem_id="multi-selection",
)
sources_visible = kwargs['langchain_mode'] != 'Disabled' and enable_sources_list
with gr.Row():
with gr.Column(scale=1):
get_sources_btn = gr.Button(value="Update UI with Document(s) from DB", scale=0, size='sm',
visible=sources_visible and kwargs['large_file_count_mode'])
# handle API get sources
get_sources_api_btn = gr.Button(visible=False)
get_sources_api_text = gr.Textbox(visible=False)
get_document_api_btn = gr.Button(visible=False)
get_document_api_text = gr.Textbox(visible=False)
show_sources_btn = gr.Button(value="Show Sources from DB", scale=0, size='sm',
visible=sources_visible and kwargs['large_file_count_mode'])
delete_sources_btn = gr.Button(value="Delete Selected Sources from DB", scale=0, size='sm',
visible=sources_visible)
refresh_sources_btn = gr.Button(value="Update DB with new/changed files on disk", scale=0,
size='sm',
visible=sources_visible and allow_upload_to_user_data)
with gr.Column(scale=4):
pass
visible_add_remove_collection = visible_upload
with gr.Row():
with gr.Column(scale=1):
add_placeholder = "e.g. UserData2, shared, user_path2" \
if not is_public else "e.g. MyData2, personal (optional)"
remove_placeholder = "e.g. UserData2" if not is_public else "e.g. MyData2"
new_langchain_mode_text = gr.Textbox(value="", visible=visible_add_remove_collection,
label='Add Collection',
placeholder=add_placeholder,
interactive=True)
remove_langchain_mode_text = gr.Textbox(value="", visible=visible_add_remove_collection,
label='Remove Collection from UI',
placeholder=remove_placeholder,
interactive=True)
purge_langchain_mode_text = gr.Textbox(value="", visible=visible_add_remove_collection,
label='Purge Collection (UI, DB, & source files)',
placeholder=remove_placeholder,
interactive=True)
sync_sources_btn = gr.Button(
value="Synchronize DB and UI [only required if did not login and have shared docs]",
scale=0, size='sm',
visible=sources_visible and allow_upload_to_user_data and not kwargs[
'large_file_count_mode'])
load_langchain = gr.Button(
value="Load Collections State [only required if logged in another user ", scale=0,
size='sm',
visible=False and allow_upload_to_user_data and
kwargs['langchain_mode'] != 'Disabled')
with gr.Column(scale=5):
if kwargs['langchain_mode'] != 'Disabled' and visible_add_remove_collection:
df0 = get_df_langchain_mode_paths(selection_docs_state0, None, dbs1=dbs)
else:
df0 = pd.DataFrame(None)
langchain_mode_path_text = gr.Dataframe(value=df0,
visible=visible_add_remove_collection,
label='LangChain Mode-Path',
show_label=False,
interactive=False)
sources_row = gr.Row(visible=kwargs['langchain_mode'] != 'Disabled' and enable_sources_list,
equal_height=False)
with sources_row:
with gr.Column(scale=1):
file_source = gr.File(interactive=False,
label="Download File w/Sources")
with gr.Column(scale=2):
sources_text = gr.HTML(label='Sources Added', interactive=False)
doc_exception_text = gr.Textbox(value="", label='Document Exceptions',
interactive=False,
visible=kwargs['langchain_mode'] != 'Disabled')
file_types_str = ' '.join(file_types) + ' URL ArXiv TEXT'
gr.Textbox(value=file_types_str, label='Document Types Supported',
lines=2,
interactive=False,
visible=kwargs['langchain_mode'] != 'Disabled')
doc_view_tab = gr.TabItem("Document Viewer") \
if kwargs['visible_doc_view_tab'] else gr.Row(visible=False)
with doc_view_tab:
with gr.Row(visible=kwargs['langchain_mode'] != 'Disabled'):
with gr.Column(scale=2):
get_viewable_sources_btn = gr.Button(value="Update UI with Document(s) from DB", scale=0,
size='sm',
visible=sources_visible and kwargs[
'large_file_count_mode'])
view_document_choice = gr.Dropdown(viewable_docs_state0,
label="Select Single Document to View",
value=None,
interactive=True,
multiselect=False,
visible=True,
elem_id="single-selection",
)
info_view_raw = "Raw text shown if render of original doc fails"
if is_public:
info_view_raw += " (Up to %s chunks in public portal)" % kwargs['max_raw_chunks']
view_raw_text_checkbox = gr.Checkbox(label="View Database Text", value=False,
info=info_view_raw,
visible=kwargs['db_type'] in ['chroma', 'chroma_old'])
with gr.Column(scale=4):
pass
doc_view = gr.HTML(visible=False)
doc_view2 = gr.Dataframe(visible=False)
doc_view3 = gr.JSON(visible=False)
doc_view4 = gr.Markdown(visible=False)
doc_view5 = gr.HTML(visible=False)
chat_tab = gr.TabItem("Chat History") \
if kwargs['visible_chat_history_tab'] else gr.Row(visible=False)
with chat_tab:
with gr.Row():
with gr.Column(scale=1):
remove_chat_btn = gr.Button(value="Remove Selected Saved Chats", visible=True, size='sm')
flag_btn = gr.Button("Flag Current Chat", size='sm')
export_chats_btn = gr.Button(value="Export Chats to Download", size='sm')
with gr.Column(scale=4):
pass
with gr.Row():
chats_file = gr.File(interactive=False, label="Download Exported Chats")
chatsup_output = gr.File(label="Upload Chat File(s)",
file_types=['.json'],
file_count='multiple',
elem_id="warning", elem_classes="feedback")
with gr.Row():
if 'mbart-' in kwargs['model_lower']:
src_lang = gr.Dropdown(list(languages_covered().keys()),
value=kwargs['src_lang'],
label="Input Language")
tgt_lang = gr.Dropdown(list(languages_covered().keys()),
value=kwargs['tgt_lang'],
label="Output Language")
chat_exception_text = gr.Textbox(value="", visible=True, label='Chat Exceptions',
interactive=False)
expert_tab = gr.TabItem("Expert") \
if kwargs['visible_expert_tab'] else gr.Row(visible=False)
with expert_tab:
with gr.Row():
with gr.Column():
prompt_type = gr.Dropdown(prompt_types_strings,
value=kwargs['prompt_type'], label="Prompt Type",
visible=not kwargs['model_lock'],
interactive=not is_public,
)
prompt_type2 = gr.Dropdown(prompt_types_strings,
value=kwargs['prompt_type'], label="Prompt Type Model 2",
visible=False and not kwargs['model_lock'],
interactive=not is_public)
system_prompt = gr.Textbox(label="System Prompt",
info="If 'auto', then uses model's system prompt,"
" else use this message."
" If empty, no system message is used",
value=kwargs['system_prompt'])
context = gr.Textbox(lines=2, label="System Pre-Context",
info="Directly pre-appended without prompt processing (before Pre-Conversation)",
value=kwargs['context'])
chat_conversation = gr.Textbox(lines=2, label="Pre-Conversation",
info="Pre-append conversation for instruct/chat models as List of tuple of (human, bot)",
value=kwargs['chat_conversation'])
text_context_list = gr.Textbox(lines=2, label="Text Doc Q/A",
info="List of strings, for document Q/A, for bypassing database (i.e. also works in LLM Mode)",
value=kwargs['chat_conversation'],
visible=not is_public, # primarily meant for API
)
iinput = gr.Textbox(lines=2, label="Input for Instruct prompt types",
info="If given for document query, added after query",
value=kwargs['iinput'],
placeholder=kwargs['placeholder_input'],
interactive=not is_public)
with gr.Column():
pre_prompt_query = gr.Textbox(label="Query Pre-Prompt",
info="Added before documents",
value=kwargs['pre_prompt_query'] or '')
prompt_query = gr.Textbox(label="Query Prompt",
info="Added after documents",
value=kwargs['prompt_query'] or '')
pre_prompt_summary = gr.Textbox(label="Summary Pre-Prompt",
info="Added before documents",
value=kwargs['pre_prompt_summary'] or '')
prompt_summary = gr.Textbox(label="Summary Prompt",
info="Added after documents (if query given, 'Focusing on {query}, ' is pre-appended)",
value=kwargs['prompt_summary'] or '')
with gr.Row(visible=not is_public):
image_loaders = gr.CheckboxGroup(image_loaders_options,
label="Force Image Reader",
value=image_loaders_options0)
pdf_loaders = gr.CheckboxGroup(pdf_loaders_options,
label="Force PDF Reader",
value=pdf_loaders_options0)
url_loaders = gr.CheckboxGroup(url_loaders_options,
label="Force URL Reader", value=url_loaders_options0)
jq_schema = gr.Textbox(label="JSON jq_schema", value=jq_schema0)
min_top_k_docs, max_top_k_docs, label_top_k_docs = get_minmax_top_k_docs(is_public)
top_k_docs = gr.Slider(minimum=min_top_k_docs, maximum=max_top_k_docs, step=1,
value=kwargs['top_k_docs'],
label=label_top_k_docs,
# info="For LangChain",
visible=kwargs['langchain_mode'] != 'Disabled',
interactive=not is_public)
chunk = gr.components.Checkbox(value=kwargs['chunk'],
label="Whether to chunk documents",
info="For LangChain",
visible=kwargs['langchain_mode'] != 'Disabled',
interactive=not is_public)
chunk_size = gr.Number(value=kwargs['chunk_size'],
label="Chunk size for document chunking",
info="For LangChain (ignored if chunk=False)",
minimum=128,
maximum=2048,
visible=kwargs['langchain_mode'] != 'Disabled',
interactive=not is_public,
precision=0)
docs_ordering_type = gr.Radio(
docs_ordering_types,
value=kwargs['docs_ordering_type'],
label="Document Sorting in LLM Context",
visible=True)
docs_token_handling = gr.Radio(
docs_token_handlings,
value=kwargs['docs_token_handling'],
label="Document Handling Mode for filling LLM Context",
visible=True)
docs_joiner = gr.Textbox(label="String to join lists and documents",
value=kwargs['docs_joiner'] or docs_joiner_default)
max_hyde_level = 0 if is_public else 5
hyde_level = gr.Slider(minimum=0, maximum=max_hyde_level, step=1,
value=kwargs['hyde_level'],
label='HYDE level',
info="Whether to use HYDE approach for LLM getting answer to embed (0=disabled, 1=non-doc LLM answer, 2=doc-based LLM answer)",
visible=kwargs['langchain_mode'] != 'Disabled',
interactive=not is_public)
hyde_template = gr.components.Textbox(value='auto',
label="HYDE Embedding Template",
info="HYDE approach for LLM getting answer to embed ('auto' means automatic, else enter template like '{query}'",
visible=True)
embed = gr.components.Checkbox(value=True,
label="Whether to embed text",
info="For LangChain",
visible=False)
with gr.Row():
stream_output = gr.components.Checkbox(label="Stream output",
value=kwargs['stream_output'])
do_sample = gr.Checkbox(label="Sample",
info="Enable sampler (required for use of temperature, top_p, top_k)",
value=kwargs['do_sample'])
max_time = gr.Slider(minimum=0, maximum=kwargs['max_max_time'], step=1,
value=min(kwargs['max_max_time'],
kwargs['max_time']), label="Max. time",
info="Max. time to search optimal output.")
temperature = gr.Slider(minimum=0.01, maximum=2,
value=kwargs['temperature'],
label="Temperature",
info="Lower is deterministic, higher more creative")
top_p = gr.Slider(minimum=1e-3, maximum=1.0 - 1e-3,
value=kwargs['top_p'], label="Top p",
info="Cumulative probability of tokens to sample from")
top_k = gr.Slider(
minimum=1, maximum=100, step=1,
value=kwargs['top_k'], label="Top k",
info='Num. tokens to sample from'
)
penalty_alpha = gr.Slider(
minimum=0.0, maximum=2.0, step=0.01,
value=kwargs['penalty_alpha'], label="penalty_alpha",
info='penalty_alpha>0 and top_k>1 enables contrastive search'
)
# FIXME: https://github.com/h2oai/h2ogpt/issues/106
if os.getenv('TESTINGFAIL'):
max_beams = 8 if not (memory_restriction_level or is_public) else 1
else:
max_beams = 1
num_beams = gr.Slider(minimum=1, maximum=max_beams, step=1,
value=min(max_beams, kwargs['num_beams']), label="Beams",
info="Number of searches for optimal overall probability. "
"Uses more GPU memory/compute",
interactive=False, visible=max_beams > 1)
max_max_new_tokens = get_max_max_new_tokens(model_state0, **kwargs)
max_new_tokens = gr.Slider(
minimum=1, maximum=max_max_new_tokens, step=1,
value=min(max_max_new_tokens, kwargs['max_new_tokens']), label="Max output length",
)
min_new_tokens = gr.Slider(
minimum=0, maximum=max_max_new_tokens, step=1,
value=min(max_max_new_tokens, kwargs['min_new_tokens']), label="Min output length",
)
max_new_tokens2 = gr.Slider(
minimum=1, maximum=max_max_new_tokens, step=1,
value=min(max_max_new_tokens, kwargs['max_new_tokens']), label="Max output length 2",
visible=False and not kwargs['model_lock'],
)
min_new_tokens2 = gr.Slider(
minimum=0, maximum=max_max_new_tokens, step=1,
value=min(max_max_new_tokens, kwargs['min_new_tokens']), label="Min output length 2",
visible=False and not kwargs['model_lock'],
)
min_max_new_tokens = gr.Slider(
minimum=1, maximum=max_max_new_tokens, step=1,
value=min(max_max_new_tokens, kwargs['min_max_new_tokens']),
label="Min. of Max output length",
visible=not is_public,
)
max_input_tokens = gr.Number(
minimum=-1, maximum=128 * 1024, step=1,
value=-1,
label="Max input length (treat as if model has more limited context, e.g. for context-filling when top_k_docs=-1)",
visible=not is_public,
)
early_stopping = gr.Checkbox(label="EarlyStopping", info="Stop early in beam search",
value=kwargs['early_stopping'], visible=max_beams > 1)
repetition_penalty = gr.Slider(minimum=0.01, maximum=3.0,
value=kwargs['repetition_penalty'],
label="Repetition Penalty")
num_return_sequences = gr.Slider(minimum=1, maximum=10, step=1,
value=kwargs['num_return_sequences'],
label="Number Returns", info="Must be <= num_beams",
interactive=not is_public, visible=max_beams > 1)
chat = gr.components.Checkbox(label="Chat mode", value=kwargs['chat'],
visible=False, # no longer support nochat in UI
interactive=not is_public,
)
with gr.Row():
count_chat_tokens_btn = gr.Button(value="Count Chat Tokens",
visible=not is_public and not kwargs['model_lock'],
interactive=not is_public, size='sm')
chat_token_count = gr.Textbox(label="Chat Token Count Result", value=None,
visible=not is_public and not kwargs['model_lock'],
interactive=False)
models_tab = gr.TabItem("Models") \
if kwargs['visible_models_tab'] and not bool(kwargs['model_lock']) else gr.Row(visible=False)
with models_tab:
load_msg = "Download/Load Model" if not is_public \
else "LOAD-UNLOAD DISABLED FOR HOSTED DEMO"
if kwargs['base_model'] not in ['', None, no_model_str]:
load_msg += ' [WARNING: Avoid --base_model on CLI for memory efficient Load-Unload]'
load_msg2 = load_msg + "(Model 2)"
variant_load_msg = 'primary' if not is_public else 'secondary'
with gr.Row():
n_gpus_list = [str(x) for x in list(range(-1, n_gpus))]
with gr.Column():
with gr.Row():
with gr.Column(scale=20, visible=not kwargs['model_lock']):
load_model_button = gr.Button(load_msg, variant=variant_load_msg, scale=0,
size='sm', interactive=not is_public)
model_choice = gr.Dropdown(model_options_state.value[0], label="Choose Base Model",
value=kwargs['base_model'])
lora_choice = gr.Dropdown(lora_options_state.value[0], label="Choose LORA",
value=kwargs['lora_weights'], visible=kwargs['show_lora'])
server_choice = gr.Dropdown(server_options_state.value[0], label="Choose Server",
value=kwargs['inference_server'], visible=not is_public)
max_seq_len = gr.Number(value=kwargs['max_seq_len'] or 2048,
minimum=128,
maximum=2 ** 18,
info="If standard LLaMa-2, choose up to 4096",
label="max_seq_len")
rope_scaling = gr.Textbox(value=str(kwargs['rope_scaling'] or {}),
label="rope_scaling")
row_llama = gr.Row(visible=kwargs['show_llama'] and kwargs['base_model'] == 'llama')
with row_llama:
model_path_llama = gr.Textbox(value=kwargs['llamacpp_dict']['model_path_llama'],
lines=4,
label="Choose LLaMa.cpp Model Path/URL (for Base Model: llama)",
visible=kwargs['show_llama'])
n_gpu_layers = gr.Number(value=kwargs['llamacpp_dict']['n_gpu_layers'],
minimum=0, maximum=100,
label="LLaMa.cpp Num. GPU Layers Offloaded",
visible=kwargs['show_llama'])
n_batch = gr.Number(value=kwargs['llamacpp_dict']['n_batch'],
minimum=0, maximum=2048,
label="LLaMa.cpp Batch Size",
visible=kwargs['show_llama'])
n_gqa = gr.Number(value=kwargs['llamacpp_dict']['n_gqa'],
minimum=0, maximum=32,
label="LLaMa.cpp Num. Group Query Attention (8 for 70B LLaMa2)",
visible=kwargs['show_llama'])
llamacpp_dict_more = gr.Textbox(value="{}",
lines=4,
label="Dict for other LLaMa.cpp/GPT4All options",
visible=kwargs['show_llama'])
row_gpt4all = gr.Row(
visible=kwargs['show_gpt4all'] and kwargs['base_model'] in ['gptj',
'gpt4all_llama'])
with row_gpt4all:
model_name_gptj = gr.Textbox(value=kwargs['llamacpp_dict']['model_name_gptj'],
label="Choose GPT4All GPTJ Model Path/URL (for Base Model: gptj)",
visible=kwargs['show_gpt4all'])
model_name_gpt4all_llama = gr.Textbox(
value=kwargs['llamacpp_dict']['model_name_gpt4all_llama'],
label="Choose GPT4All LLaMa Model Path/URL (for Base Model: gpt4all_llama)",
visible=kwargs['show_gpt4all'])
with gr.Column(scale=1, visible=not kwargs['model_lock']):
model_load8bit_checkbox = gr.components.Checkbox(
label="Load 8-bit [requires support]",
value=kwargs['load_8bit'], interactive=not is_public)
model_load4bit_checkbox = gr.components.Checkbox(
label="Load 4-bit [requires support]",
value=kwargs['load_4bit'], interactive=not is_public)
model_low_bit_mode = gr.Slider(value=kwargs['low_bit_mode'],
minimum=0, maximum=4, step=1,
label="low_bit_mode")
model_load_gptq = gr.Textbox(label="gptq", value=kwargs['load_gptq'],
interactive=not is_public)
model_load_awq = gr.Textbox(label="awq", value=kwargs['load_awq'],
interactive=not is_public)
model_load_exllama_checkbox = gr.components.Checkbox(
label="Load load_exllama [requires support]",
value=kwargs['load_exllama'], interactive=not is_public)
model_safetensors_checkbox = gr.components.Checkbox(
label="Safetensors [requires support]",
value=kwargs['use_safetensors'], interactive=not is_public)
model_revision = gr.Textbox(label="revision", value=kwargs['revision'],
interactive=not is_public)
model_use_gpu_id_checkbox = gr.components.Checkbox(
label="Choose Devices [If not Checked, use all GPUs]",
value=kwargs['use_gpu_id'], interactive=not is_public,
visible=n_gpus != 0)
model_gpu = gr.Dropdown(n_gpus_list,
label="GPU ID [-1 = all GPUs, if Choose is enabled]",
value=kwargs['gpu_id'], interactive=not is_public,
visible=n_gpus != 0)
model_used = gr.Textbox(label="Current Model", value=kwargs['base_model'],
interactive=False)
lora_used = gr.Textbox(label="Current LORA", value=kwargs['lora_weights'],
visible=kwargs['show_lora'], interactive=False)
server_used = gr.Textbox(label="Current Server",
value=kwargs['inference_server'],
visible=bool(kwargs['inference_server']) and not is_public,
interactive=False)
prompt_dict = gr.Textbox(label="Prompt (or Custom)",
value=pprint.pformat(kwargs['prompt_dict'], indent=4),
interactive=not is_public, lines=4)
col_model2 = gr.Column(visible=False)
with col_model2:
with gr.Row():
with gr.Column(scale=20, visible=not kwargs['model_lock']):
load_model_button2 = gr.Button(load_msg2, variant=variant_load_msg, scale=0,
size='sm', interactive=not is_public)
model_choice2 = gr.Dropdown(model_options_state.value[0], label="Choose Model 2",
value=no_model_str)
lora_choice2 = gr.Dropdown(lora_options_state.value[0], label="Choose LORA 2",
value=no_lora_str,
visible=kwargs['show_lora'])
server_choice2 = gr.Dropdown(server_options_state.value[0], label="Choose Server 2",
value=no_server_str,
visible=not is_public)
max_seq_len2 = gr.Number(value=kwargs['max_seq_len'] or 2048,
minimum=128,
maximum=2 ** 18,
info="If standard LLaMa-2, choose up to 4096",
label="max_seq_len Model 2")
rope_scaling2 = gr.Textbox(value=str(kwargs['rope_scaling'] or {}),
label="rope_scaling Model 2")
row_llama2 = gr.Row(
visible=kwargs['show_llama'] and kwargs['base_model'] == 'llama')
with row_llama2:
model_path_llama2 = gr.Textbox(
value=kwargs['llamacpp_dict']['model_path_llama'],
label="Choose LLaMa.cpp Model 2 Path/URL (for Base Model: llama)",
lines=4,
visible=kwargs['show_llama'])
n_gpu_layers2 = gr.Number(value=kwargs['llamacpp_dict']['n_gpu_layers'],
minimum=0, maximum=100,
label="LLaMa.cpp Num. GPU 2 Layers Offloaded",
visible=kwargs['show_llama'])
n_batch2 = gr.Number(value=kwargs['llamacpp_dict']['n_batch'],
minimum=0, maximum=2048,
label="LLaMa.cpp Model 2 Batch Size",
visible=kwargs['show_llama'])
n_gqa2 = gr.Number(value=kwargs['llamacpp_dict']['n_gqa'],
minimum=0, maximum=32,
label="LLaMa.cpp Model 2 Num. Group Query Attention (8 for 70B LLaMa2)",
visible=kwargs['show_llama'])
llamacpp_dict_more2 = gr.Textbox(value="{}",
lines=4,
label="Model 2 Dict for other LLaMa.cpp/GPT4All options",
visible=kwargs['show_llama'])
row_gpt4all2 = gr.Row(
visible=kwargs['show_gpt4all'] and kwargs['base_model'] in ['gptj',
'gpt4all_llama'])
with row_gpt4all2:
model_name_gptj2 = gr.Textbox(value=kwargs['llamacpp_dict']['model_name_gptj'],
label="Choose GPT4All GPTJ Model 2 Path/URL (for Base Model: gptj)",
visible=kwargs['show_gpt4all'])
model_name_gpt4all_llama2 = gr.Textbox(
value=kwargs['llamacpp_dict']['model_name_gpt4all_llama'],
label="Choose GPT4All LLaMa Model 2 Path/URL (for Base Model: gpt4all_llama)",
visible=kwargs['show_gpt4all'])
with gr.Column(scale=1, visible=not kwargs['model_lock']):
model_load8bit_checkbox2 = gr.components.Checkbox(
label="Load 8-bit (Model 2) [requires support]",
value=kwargs['load_8bit'], interactive=not is_public)
model_load4bit_checkbox2 = gr.components.Checkbox(
label="Load 4-bit (Model 2) [requires support]",
value=kwargs['load_4bit'], interactive=not is_public)
model_low_bit_mode2 = gr.Slider(value=kwargs['low_bit_mode'],
# ok that same as Model 1
minimum=0, maximum=4, step=1,
label="low_bit_mode (Model 2)")
model_load_gptq2 = gr.Textbox(label="gptq (Model 2)", value='',
interactive=not is_public)
model_load_awq2 = gr.Textbox(label="awq", value='',
interactive=not is_public)
model_load_exllama_checkbox2 = gr.components.Checkbox(
label="Load load_exllama (Model 2) [requires support]",
value=False, interactive=not is_public)
model_safetensors_checkbox2 = gr.components.Checkbox(
label="Safetensors (Model 2) [requires support]",
value=False, interactive=not is_public)
model_revision2 = gr.Textbox(label="revision (Model 2)", value='',
interactive=not is_public)
model_use_gpu_id_checkbox2 = gr.components.Checkbox(
label="Choose Devices (Model 2) [If not Checked, use all GPUs]",
value=kwargs[
'use_gpu_id'], interactive=not is_public)
model_gpu2 = gr.Dropdown(n_gpus_list,
label="GPU ID (Model 2) [-1 = all GPUs, if choose is enabled]",
value=kwargs['gpu_id'], interactive=not is_public)
# no model/lora loaded ever in model2 by default
model_used2 = gr.Textbox(label="Current Model 2", value=no_model_str,
interactive=False)
lora_used2 = gr.Textbox(label="Current LORA (Model 2)", value=no_lora_str,
visible=kwargs['show_lora'], interactive=False)
server_used2 = gr.Textbox(label="Current Server (Model 2)", value=no_server_str,
interactive=False,
visible=not is_public)
prompt_dict2 = gr.Textbox(label="Prompt (or Custom) (Model 2)",
value=pprint.pformat(kwargs['prompt_dict'], indent=4),
interactive=not is_public, lines=4)
compare_checkbox = gr.components.Checkbox(label="Compare Two Models",
value=kwargs['model_lock'],
visible=not is_public and not kwargs['model_lock'])
with gr.Row(visible=not kwargs['model_lock']):
with gr.Column(scale=50):
new_model = gr.Textbox(label="New Model name/path/URL", interactive=not is_public)
with gr.Column(scale=50):
new_lora = gr.Textbox(label="New LORA name/path/URL", visible=kwargs['show_lora'],
interactive=not is_public)
with gr.Column(scale=50):
new_server = gr.Textbox(label="New Server url:port", interactive=not is_public)
with gr.Row():
add_model_lora_server_button = gr.Button("Add new Model, Lora, Server url:port", scale=0,
variant=variant_load_msg,
size='sm', interactive=not is_public)
system_tab = gr.TabItem("System") \
if kwargs['visible_system_tab'] else gr.Row(visible=False)
with system_tab:
with gr.Row():
with gr.Column(scale=1):
side_bar_text = gr.Textbox('on' if kwargs['visible_side_bar'] else 'off',
visible=False, interactive=False)
doc_count_text = gr.Textbox('on' if kwargs['visible_doc_track'] else 'off',
visible=False, interactive=False)
submit_buttons_text = gr.Textbox('on' if kwargs['visible_submit_buttons'] else 'off',
visible=False, interactive=False)
visible_models_text = gr.Textbox('on' if kwargs['visible_visible_models'] else 'off',
visible=False, interactive=False)
side_bar_btn = gr.Button("Toggle SideBar", variant="secondary", size="sm")
doc_count_btn = gr.Button("Toggle SideBar Document Count/Show Newest", variant="secondary",
size="sm")
submit_buttons_btn = gr.Button("Toggle Submit Buttons", variant="secondary", size="sm")
visible_model_btn = gr.Button("Toggle Visible Models", variant="secondary", size="sm")
col_tabs_scale = gr.Slider(minimum=1, maximum=20, value=10, step=1, label='Window Size')
text_outputs_height = gr.Slider(minimum=100, maximum=2000, value=kwargs['height'] or 400,
step=50, label='Chat Height')
dark_mode_btn = gr.Button("Dark Mode", variant="secondary", size="sm")
with gr.Column(scale=4):
pass
system_visible0 = not is_public and not admin_pass
admin_row = gr.Row()
with admin_row:
with gr.Column(scale=1):
admin_pass_textbox = gr.Textbox(label="Admin Password",
type='password',
visible=not system_visible0)
with gr.Column(scale=4):
pass
system_row = gr.Row(visible=system_visible0)
with system_row:
with gr.Column():
with gr.Row():
system_btn = gr.Button(value='Get System Info', size='sm')
system_text = gr.Textbox(label='System Info', interactive=False, show_copy_button=True)
with gr.Row():
system_input = gr.Textbox(label='System Info Dict Password', interactive=True,
visible=not is_public)
system_btn2 = gr.Button(value='Get System Info Dict', visible=not is_public, size='sm')
system_text2 = gr.Textbox(label='System Info Dict', interactive=False,
visible=not is_public, show_copy_button=True)
with gr.Row():
system_btn3 = gr.Button(value='Get Hash', visible=not is_public, size='sm')
system_text3 = gr.Textbox(label='Hash', interactive=False,
visible=not is_public, show_copy_button=True)
system_btn4 = gr.Button(value='Get Model Names', visible=not is_public, size='sm')
system_text4 = gr.Textbox(label='Model Names', interactive=False,
visible=not is_public, show_copy_button=True)
with gr.Row():
zip_btn = gr.Button("Zip", size='sm')
zip_text = gr.Textbox(label="Zip file name", interactive=False)
file_output = gr.File(interactive=False, label="Zip file to Download")
with gr.Row():
s3up_btn = gr.Button("S3UP", size='sm')
s3up_text = gr.Textbox(label='S3UP result', interactive=False)
tos_tab = gr.TabItem("Terms of Service") \
if kwargs['visible_tos_tab'] else gr.Row(visible=False)
with tos_tab:
description = ""
description += """<p><b> DISCLAIMERS: </b><ul><i><li>The model was trained on The Pile and other data, which may contain objectionable content. Use at own risk.</i></li>"""
if kwargs['load_8bit']:
description += """<i><li> Model is loaded in 8-bit and has other restrictions on this host. UX can be worse than non-hosted version.</i></li>"""
description += """<i><li>Conversations may be used to improve h2oGPT. Do not share sensitive information.</i></li>"""
if 'h2ogpt-research' in kwargs['base_model']:
description += """<i><li>Research demonstration only, not used for commercial purposes.</i></li>"""
description += """<i><li>By using h2oGPT, you accept our <a href="https://github.com/h2oai/h2ogpt/blob/main/docs/tos.md">Terms of Service</a></i></li></ul></p>"""
gr.Markdown(value=description, show_label=False, interactive=False)
login_tab = gr.TabItem("Login") \
if kwargs['visible_login_tab'] else gr.Row(visible=False)
with login_tab:
gr.Markdown(
value="#### Login page to persist your state (database, documents, chat, chat history)\nDaily maintenance at midnight PST will not allow reconnection to state otherwise.")
username_text = gr.Textbox(label="Username")
password_text = gr.Textbox(label="Password", type='password', visible=True)
login_msg = "Login (pick unique user/pass to persist your state)" if kwargs[
'auth_access'] == 'open' else "Login (closed access)"
login_btn = gr.Button(value=login_msg)
login_result_text = gr.Text(label="Login Result", interactive=False)
if kwargs['enforce_h2ogpt_api_key'] and kwargs['enforce_h2ogpt_ui_key']:
label_h2ogpt_key = "h2oGPT Token for API and UI access"
elif kwargs['enforce_h2ogpt_api_key']:
label_h2ogpt_key = "h2oGPT Token for API access"
elif kwargs['enforce_h2ogpt_ui_key']:
label_h2ogpt_key = "h2oGPT Token for UI access"
else:
label_h2ogpt_key = 'Unused'
h2ogpt_key = gr.Text(value=kwargs['h2ogpt_key'],
label=label_h2ogpt_key,
type='password',
visible=kwargs['enforce_h2ogpt_ui_key'], # only show if need for UI
)
hosts_tab = gr.TabItem("Hosts") \
if kwargs['visible_hosts_tab'] else gr.Row(visible=False)
with hosts_tab:
gr.Markdown(f"""
{description_bottom}
{task_info_md}
""")
# Get flagged data
zip_data1 = functools.partial(zip_data, root_dirs=['flagged_data_points', kwargs['save_dir']])
zip_event = zip_btn.click(zip_data1, inputs=None, outputs=[file_output, zip_text], queue=False,
api_name='zip_data' if allow_api else None)
s3up_event = s3up_btn.click(s3up, inputs=zip_text, outputs=s3up_text, queue=False,
api_name='s3up_data' if allow_api else None)
def clear_file_list():
return None
def set_loaders(max_quality1,
image_loaders_options1=None,
pdf_loaders_options1=None,
url_loaders_options1=None,
image_loaders_options01=None,
pdf_loaders_options01=None,
url_loaders_options01=None,
):
if not max_quality1:
return image_loaders_options01, pdf_loaders_options01, url_loaders_options01
else:
return image_loaders_options1, pdf_loaders_options1, url_loaders_options1
set_loaders_func = functools.partial(set_loaders,
image_loaders_options1=image_loaders_options,
pdf_loaders_options1=pdf_loaders_options,
url_loaders_options1=url_loaders_options,
image_loaders_options01=image_loaders_options0,
pdf_loaders_options01=pdf_loaders_options0,
url_loaders_options01=url_loaders_options0,
)
max_quality.change(fn=set_loaders_func,
inputs=max_quality,
outputs=[image_loaders, pdf_loaders, url_loaders])
def get_model_lock_visible_list(visible_models1, all_models):
visible_list = []
for modeli, model in enumerate(all_models):
if visible_models1 is None or model in visible_models1 or modeli in visible_models1:
visible_list.append(True)
else:
visible_list.append(False)
return visible_list
def set_visible_models(visible_models1, num_model_lock=0, all_models=None):
if num_model_lock == 0:
num_model_lock = 3 # 2 + 1 (which is dup of first)
ret_list = [gr.Textbox(visible=True)] * num_model_lock
else:
assert isinstance(all_models, list)
assert num_model_lock == len(all_models)
visible_list = [False, False] + get_model_lock_visible_list(visible_models1, all_models)
ret_list = [gr.Textbox(visible=x) for x in visible_list]
return tuple(ret_list)
visible_models_func = functools.partial(set_visible_models,
num_model_lock=len(text_outputs),
all_models=kwargs['all_models'])
visible_models.change(fn=visible_models_func,
inputs=visible_models,
outputs=[text_output, text_output2] + text_outputs,
)
# Add to UserData or custom user db
update_db_func = functools.partial(update_user_db_gr,
dbs=dbs,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
captions_model=captions_model,
caption_loader=caption_loader,
doctr_loader=doctr_loader,
verbose=kwargs['verbose'],
n_jobs=kwargs['n_jobs'],
get_userid_auth=get_userid_auth,
image_loaders_options0=image_loaders_options0,
pdf_loaders_options0=pdf_loaders_options0,
url_loaders_options0=url_loaders_options0,
jq_schema0=jq_schema0,
enforce_h2ogpt_api_key=kwargs['enforce_h2ogpt_api_key'],
enforce_h2ogpt_ui_key=kwargs['enforce_h2ogpt_ui_key'],
h2ogpt_api_keys=kwargs['h2ogpt_api_keys'],
)
add_file_outputs = [fileup_output, langchain_mode]
add_file_kwargs = dict(fn=update_db_func,
inputs=[fileup_output, my_db_state, selection_docs_state, requests_state,
langchain_mode, chunk, chunk_size, embed,
image_loaders,
pdf_loaders,
url_loaders,
jq_schema,
h2ogpt_key,
],
outputs=add_file_outputs + [sources_text, doc_exception_text, text_file_last],
queue=queue,
api_name='add_file' if allow_upload_api else None)
# then no need for add buttons, only single changeable db
user_state_kwargs = dict(fn=user_state_setup,
inputs=[my_db_state, requests_state, langchain_mode],
outputs=[my_db_state, requests_state, langchain_mode],
show_progress='minimal')
eventdb1a = fileup_output.upload(**user_state_kwargs)
eventdb1 = eventdb1a.then(**add_file_kwargs, show_progress='full')
event_attach1 = attach_button.upload(**user_state_kwargs)
attach_file_kwargs = add_file_kwargs.copy()
attach_file_kwargs['inputs'][0] = attach_button
attach_file_kwargs['outputs'][0] = attach_button
attach_file_kwargs['api_name'] = 'attach_file'
event_attach2 = event_attach1.then(**attach_file_kwargs, show_progress='full')
sync1 = sync_sources_btn.click(**user_state_kwargs)
# deal with challenge to have fileup_output itself as input
add_file_kwargs2 = dict(fn=update_db_func,
inputs=[fileup_output_text, my_db_state, selection_docs_state, requests_state,
langchain_mode, chunk, chunk_size, embed,
image_loaders,
pdf_loaders,
url_loaders,
jq_schema,
h2ogpt_key,
],
outputs=add_file_outputs + [sources_text, doc_exception_text, text_file_last],
queue=queue,
api_name='add_file_api' if allow_upload_api else None)
eventdb1_api = fileup_output_text.submit(**add_file_kwargs2, show_progress='full')
# note for update_user_db_func output is ignored for db
def clear_textbox():
return gr.Textbox(value='')
update_user_db_url_func = functools.partial(update_db_func, is_url=True)
add_url_outputs = [url_text, langchain_mode]
add_url_kwargs = dict(fn=update_user_db_url_func,
inputs=[url_text, my_db_state, selection_docs_state, requests_state,
langchain_mode, chunk, chunk_size, embed,
image_loaders,
pdf_loaders,
url_loaders,
jq_schema,
h2ogpt_key,
],
outputs=add_url_outputs + [sources_text, doc_exception_text, text_file_last],
queue=queue,
api_name='add_url' if allow_upload_api else None)
eventdb2a = url_text.submit(fn=user_state_setup,
inputs=[my_db_state, requests_state, url_text, url_text],
outputs=[my_db_state, requests_state, url_text],
queue=queue,
show_progress='minimal')
# work around https://github.com/gradio-app/gradio/issues/4733
eventdb2 = eventdb2a.then(**add_url_kwargs, show_progress='full')
update_user_db_txt_func = functools.partial(update_db_func, is_txt=True)
add_text_outputs = [user_text_text, langchain_mode]
add_text_kwargs = dict(fn=update_user_db_txt_func,
inputs=[user_text_text, my_db_state, selection_docs_state, requests_state,
langchain_mode, chunk, chunk_size, embed,
image_loaders,
pdf_loaders,
url_loaders,
jq_schema,
h2ogpt_key,
],
outputs=add_text_outputs + [sources_text, doc_exception_text, text_file_last],
queue=queue,
api_name='add_text' if allow_upload_api else None
)
eventdb3a = user_text_text.submit(fn=user_state_setup,
inputs=[my_db_state, requests_state, user_text_text, user_text_text],
outputs=[my_db_state, requests_state, user_text_text],
queue=queue,
show_progress='minimal')
eventdb3 = eventdb3a.then(**add_text_kwargs, show_progress='full')
db_events = [eventdb1a, eventdb1, eventdb1_api,
eventdb2a, eventdb2,
eventdb3a, eventdb3]
db_events.extend([event_attach1, event_attach2])
get_sources1 = functools.partial(get_sources_gr, dbs=dbs, docs_state0=docs_state0,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
verbose=verbose,
get_userid_auth=get_userid_auth,
n_jobs=n_jobs,
)
# if change collection source, must clear doc selections from it to avoid inconsistency
def clear_doc_choice(langchain_mode1):
if langchain_mode1 in langchain_modes_non_db:
label1 = 'Choose Resources->Collections and Pick Collection'
active_collection1 = "#### Not Chatting with Any Collection\n%s" % label1
else:
label1 = 'Select Subset of Document(s) for Chat with Collection: %s' % langchain_mode1
active_collection1 = "#### Chatting with Collection: %s" % langchain_mode1
return gr.Dropdown(choices=docs_state0, value=[DocumentChoice.ALL.value],
label=label1), gr.Markdown(value=active_collection1)
lg_change_event = langchain_mode.change(clear_doc_choice, inputs=langchain_mode,
outputs=[document_choice, active_collection],
queue=not kwargs['large_file_count_mode'])
def change_visible_llama(x):
if x == 'llama':
return gr.update(visible=True), \
gr.update(visible=True), \
gr.update(visible=False), \
gr.update(visible=False)
elif x in ['gptj', 'gpt4all_llama']:
return gr.update(visible=False), \
gr.update(visible=False), \
gr.update(visible=True), \
gr.update(visible=True)
else:
return gr.update(visible=False), \
gr.update(visible=False), \
gr.update(visible=False), \
gr.update(visible=False)
model_choice.change(change_visible_llama,
inputs=model_choice,
outputs=[row_llama, row_llama2, row_gpt4all, row_gpt4all2])
def resize_col_tabs(x):
return gr.Dropdown(scale=x)
col_tabs_scale.change(fn=resize_col_tabs, inputs=col_tabs_scale, outputs=col_tabs, queue=False)
def resize_chatbots(x, num_model_lock=0):
if num_model_lock == 0:
num_model_lock = 3 # 2 + 1 (which is dup of first)
else:
num_model_lock = 2 + num_model_lock
return tuple([gr.update(height=x)] * num_model_lock)
resize_chatbots_func = functools.partial(resize_chatbots, num_model_lock=len(text_outputs))
text_outputs_height.change(fn=resize_chatbots_func, inputs=text_outputs_height,
outputs=[text_output, text_output2] + text_outputs, queue=False)
def update_dropdown(x):
if DocumentChoice.ALL.value in x:
x.remove(DocumentChoice.ALL.value)
source_list = [DocumentChoice.ALL.value] + x
return gr.Dropdown(choices=source_list, value=[DocumentChoice.ALL.value])
get_sources_kwargs = dict(fn=get_sources1,
inputs=[my_db_state, selection_docs_state, requests_state, langchain_mode],
outputs=[file_source, docs_state, text_doc_count],
queue=queue)
eventdb7a = get_sources_btn.click(user_state_setup,
inputs=[my_db_state, requests_state, get_sources_btn, get_sources_btn],
outputs=[my_db_state, requests_state, get_sources_btn],
show_progress='minimal')
eventdb7 = eventdb7a.then(**get_sources_kwargs,
api_name='get_sources' if allow_api else None) \
.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
get_sources_api_args = dict(fn=functools.partial(get_sources1, api=True),
inputs=[my_db_state, selection_docs_state, requests_state, langchain_mode],
outputs=get_sources_api_text,
queue=queue)
get_sources_api_btn.click(**get_sources_api_args,
api_name='get_sources_api' if allow_api else None)
# show button, else only show when add.
# Could add to above get_sources for download/dropdown, but bit much maybe
show_sources1 = functools.partial(get_source_files_given_langchain_mode_gr,
dbs=dbs,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
verbose=verbose,
get_userid_auth=get_userid_auth,
n_jobs=n_jobs)
eventdb8a = show_sources_btn.click(user_state_setup,
inputs=[my_db_state, requests_state, show_sources_btn, show_sources_btn],
outputs=[my_db_state, requests_state, show_sources_btn],
show_progress='minimal')
show_sources_kwargs = dict(fn=show_sources1,
inputs=[my_db_state, selection_docs_state, requests_state, langchain_mode],
outputs=sources_text)
eventdb8 = eventdb8a.then(**show_sources_kwargs,
api_name='show_sources' if allow_api else None)
def update_viewable_dropdown(x):
return gr.Dropdown(choices=x,
value=viewable_docs_state0[0] if len(viewable_docs_state0) > 0 else None)
get_viewable_sources1 = functools.partial(get_sources_gr, dbs=dbs, docs_state0=viewable_docs_state0,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
verbose=kwargs['verbose'],
get_userid_auth=get_userid_auth,
n_jobs=n_jobs)
get_viewable_sources_args = dict(fn=get_viewable_sources1,
inputs=[my_db_state, selection_docs_state, requests_state, langchain_mode],
outputs=[file_source, viewable_docs_state, text_viewable_doc_count],
queue=queue)
eventdb12a = get_viewable_sources_btn.click(user_state_setup,
inputs=[my_db_state, requests_state,
get_viewable_sources_btn, get_viewable_sources_btn],
outputs=[my_db_state, requests_state, get_viewable_sources_btn],
show_progress='minimal')
viewable_kwargs = dict(fn=update_viewable_dropdown, inputs=viewable_docs_state, outputs=view_document_choice)
eventdb12 = eventdb12a.then(**get_viewable_sources_args,
api_name='get_viewable_sources' if allow_api else None) \
.then(**viewable_kwargs)
eventdb_viewa = view_document_choice.select(user_state_setup,
inputs=[my_db_state, requests_state,
view_document_choice],
outputs=[my_db_state, requests_state],
show_progress='minimal')
show_doc_func = functools.partial(show_doc,
dbs1=dbs,
load_db_if_exists1=load_db_if_exists,
db_type1=db_type,
use_openai_embedding1=use_openai_embedding,
hf_embedding_model1=hf_embedding_model,
migrate_embedding_model_or_db1=migrate_embedding_model,
auto_migrate_db1=auto_migrate_db,
verbose1=verbose,
get_userid_auth1=get_userid_auth,
max_raw_chunks=kwargs['max_raw_chunks'],
api=False,
n_jobs=n_jobs,
)
# Note: Not really useful for API, so no api_name
eventdb_viewa.then(fn=show_doc_func,
inputs=[my_db_state, selection_docs_state, requests_state, langchain_mode,
view_document_choice, view_raw_text_checkbox,
text_context_list],
outputs=[doc_view, doc_view2, doc_view3, doc_view4, doc_view5])
show_doc_func_api = functools.partial(show_doc_func, api=True)
get_document_api_btn.click(fn=show_doc_func_api,
inputs=[my_db_state, selection_docs_state, requests_state, langchain_mode,
view_document_choice, view_raw_text_checkbox,
text_context_list],
outputs=get_document_api_text, api_name='get_document_api')
# Get inputs to evaluate() and make_db()
# don't deepcopy, can contain model itself
all_kwargs = kwargs.copy()
all_kwargs.update(locals())
refresh_sources1 = functools.partial(update_and_get_source_files_given_langchain_mode_gr,
captions_model=captions_model,
caption_loader=caption_loader,
doctr_loader=doctr_loader,
dbs=dbs,
first_para=kwargs['first_para'],
hf_embedding_model=hf_embedding_model,
use_openai_embedding=use_openai_embedding,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
text_limit=kwargs['text_limit'],
db_type=db_type,
load_db_if_exists=load_db_if_exists,
n_jobs=n_jobs, verbose=verbose,
get_userid_auth=get_userid_auth,
image_loaders_options0=image_loaders_options0,
pdf_loaders_options0=pdf_loaders_options0,
url_loaders_options0=url_loaders_options0,
jq_schema0=jq_schema0,
)
eventdb9a = refresh_sources_btn.click(user_state_setup,
inputs=[my_db_state, requests_state,
refresh_sources_btn, refresh_sources_btn],
outputs=[my_db_state, requests_state, refresh_sources_btn],
show_progress='minimal')
eventdb9 = eventdb9a.then(fn=refresh_sources1,
inputs=[my_db_state, selection_docs_state, requests_state,
langchain_mode, chunk, chunk_size,
image_loaders,
pdf_loaders,
url_loaders,
jq_schema,
],
outputs=sources_text,
api_name='refresh_sources' if allow_api else None)
delete_sources1 = functools.partial(del_source_files_given_langchain_mode_gr,
dbs=dbs,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
verbose=verbose,
get_userid_auth=get_userid_auth,
n_jobs=n_jobs)
eventdb90a = delete_sources_btn.click(user_state_setup,
inputs=[my_db_state, requests_state,
delete_sources_btn, delete_sources_btn],
outputs=[my_db_state, requests_state, delete_sources_btn],
show_progress='minimal')
eventdb90 = eventdb90a.then(fn=delete_sources1,
inputs=[my_db_state, selection_docs_state, requests_state, document_choice,
langchain_mode],
outputs=sources_text,
api_name='delete_sources' if allow_api else None)
db_events.extend([eventdb90a, eventdb90])
def check_admin_pass(x):
return gr.update(visible=x == admin_pass)
def close_admin(x):
return gr.update(visible=not (x == admin_pass))
eventdb_logina = login_btn.click(user_state_setup,
inputs=[my_db_state, requests_state, login_btn, login_btn],
outputs=[my_db_state, requests_state, login_btn],
show_progress='minimal')
def login(db1s, selection_docs_state1, requests_state1, chat_state1, langchain_mode1,
username1, password1,
text_output1, text_output21, *text_outputs1,
auth_filename=None, num_model_lock=0, pre_authorized=False):
# use full auth login to allow new users if open access etc.
if pre_authorized:
username1 = requests_state1['username']
password1 = None
authorized1 = True
else:
authorized1 = authf(username1, password1, selection_docs_state1=selection_docs_state1)
if authorized1:
if not isinstance(requests_state1, dict):
requests_state1 = {}
requests_state1['username'] = username1
set_userid_gr(db1s, requests_state1, get_userid_auth)
username2 = get_username(requests_state1)
text_outputs1 = list(text_outputs1)
success1, text_result, text_output1, text_output21, text_outputs1, langchain_mode1 = \
load_auth(db1s, requests_state1, auth_filename, selection_docs_state1=selection_docs_state1,
chat_state1=chat_state1, langchain_mode1=langchain_mode1,
text_output1=text_output1, text_output21=text_output21, text_outputs1=text_outputs1,
username_override=username1, password_to_check=password1)
else:
success1 = False
text_result = "Wrong password for user %s" % username1
df_langchain_mode_paths1 = get_df_langchain_mode_paths(selection_docs_state1, db1s, dbs1=dbs)
if success1:
requests_state1['username'] = username1
label_instruction1 = 'Ask anything, %s' % requests_state1['username']
return db1s, selection_docs_state1, requests_state1, chat_state1, \
text_result, \
gr.update(label=label_instruction1), \
df_langchain_mode_paths1, \
gr.update(choices=list(chat_state1.keys()), value=None), \
gr.update(choices=get_langchain_choices(selection_docs_state1),
value=langchain_mode1), \
text_output1, text_output21, *tuple(text_outputs1)
login_func = functools.partial(login,
auth_filename=kwargs['auth_filename'],
num_model_lock=len(text_outputs),
pre_authorized=False,
)
load_login_func = functools.partial(login,
auth_filename=kwargs['auth_filename'],
num_model_lock=len(text_outputs),
pre_authorized=True,
)
login_inputs = [my_db_state, selection_docs_state, requests_state, chat_state,
langchain_mode,
username_text, password_text,
text_output, text_output2] + text_outputs
login_outputs = [my_db_state, selection_docs_state, requests_state, chat_state,
login_result_text,
instruction,
langchain_mode_path_text,
radio_chats,
langchain_mode,
text_output, text_output2] + text_outputs
eventdb_loginb = eventdb_logina.then(login_func,
inputs=login_inputs,
outputs=login_outputs,
queue=not kwargs['large_file_count_mode'])
admin_pass_textbox.submit(check_admin_pass, inputs=admin_pass_textbox, outputs=system_row, queue=False) \
.then(close_admin, inputs=admin_pass_textbox, outputs=admin_row, queue=False)
def load_auth(db1s, requests_state1, auth_filename=None, selection_docs_state1=None,
chat_state1=None, langchain_mode1=None,
text_output1=None, text_output21=None, text_outputs1=None,
username_override=None, password_to_check=None):
# in-place assignment
if not auth_filename:
return False, "No auth file", text_output1, text_output21, text_outputs1
# if first time here, need to set userID
set_userid_gr(db1s, requests_state1, get_userid_auth)
if username_override:
username1 = username_override
else:
username1 = get_username(requests_state1)
success1 = False
with filelock.FileLock(auth_filename + '.lock'):
if os.path.isfile(auth_filename):
with open(auth_filename, 'rt') as f:
auth_dict = json.load(f)
if username1 in auth_dict:
auth_user = auth_dict[username1]
if password_to_check:
if auth_user['password'] != password_to_check:
return False, [], [], [], "Invalid password for user %s" % username1
if username_override:
# then use original user id
set_userid_direct_gr(db1s, auth_dict[username1]['userid'], username1)
if 'selection_docs_state' in auth_user:
update_auth_selection(auth_user, selection_docs_state1)
if 'chat_state' in auth_user:
chat_state1.update(auth_user['chat_state'])
if 'text_output' in auth_user:
text_output1 = auth_user['text_output']
if 'text_output2' in auth_user:
text_output21 = auth_user['text_output2']
if 'text_outputs' in auth_user:
text_outputs1 = auth_user['text_outputs']
if 'langchain_mode' in auth_user:
langchain_mode1 = auth_user['langchain_mode']
text_result = "Successful login for %s" % username1
success1 = True
else:
text_result = "No user %s" % username1
else:
text_result = "No auth file"
return success1, text_result, text_output1, text_output21, text_outputs1, langchain_mode1
def save_auth_dict(auth_dict, auth_filename):
backup_file = auth_filename + '.bak' + str(uuid.uuid4())
if os.path.isfile(auth_filename):
shutil.copy(auth_filename, backup_file)
try:
with open(auth_filename, 'wt') as f:
f.write(json.dumps(auth_dict, indent=2))
except BaseException as e:
print("Failure to save auth %s, restored backup: %s: %s" % (auth_filename, backup_file, str(e)),
flush=True)
shutil.copy(backup_file, auth_dict)
if os.getenv('HARD_ASSERTS'):
# unexpected in testing or normally
raise
def save_auth(selection_docs_state1, requests_state1,
chat_state1, langchain_mode1,
text_output1, text_output21, text_outputs1,
auth_filename=None, auth_access=None, auth_freeze=None, guest_name=None,
):
if auth_freeze:
return
if not auth_filename:
return
# save to auth file
username1 = get_username(requests_state1)
with filelock.FileLock(auth_filename + '.lock'):
if os.path.isfile(auth_filename):
with open(auth_filename, 'rt') as f:
auth_dict = json.load(f)
if username1 in auth_dict:
auth_user = auth_dict[username1]
if selection_docs_state1:
update_auth_selection(auth_user, selection_docs_state1, save=True)
if chat_state1:
# overwrite
auth_user['chat_state'] = chat_state1
if text_output1:
auth_user['text_output'] = text_output1
if text_output21:
auth_user['text_output2'] = text_output21
if text_outputs1:
auth_user['text_outputs'] = text_outputs1
if langchain_mode1:
auth_user['langchain_mode'] = langchain_mode1
save_auth_dict(auth_dict, auth_filename)
def save_auth_wrap(*args, **kwargs):
save_auth(args[0], args[1],
args[2], args[3],
args[4], args[5], args[6:], **kwargs
)
save_auth_func = functools.partial(save_auth_wrap,
auth_filename=kwargs['auth_filename'],
auth_access=kwargs['auth_access'],
auth_freeze=kwargs['auth_freeze'],
guest_name=kwargs['guest_name'],
)
save_auth_kwargs = dict(fn=save_auth_func,
inputs=[selection_docs_state, requests_state,
chat_state, langchain_mode, text_output, text_output2] + text_outputs
)
lg_change_event_auth = lg_change_event.then(**save_auth_kwargs)
def add_langchain_mode(db1s, selection_docs_state1, requests_state1, langchain_mode1, y,
auth_filename=None, auth_freeze=None, guest_name=None):
assert auth_filename is not None
assert auth_freeze is not None
set_userid_gr(db1s, requests_state1, get_userid_auth)
username1 = get_username(requests_state1)
for k in db1s:
set_dbid_gr(db1s[k])
langchain_modes = selection_docs_state1['langchain_modes']
langchain_mode_paths = selection_docs_state1['langchain_mode_paths']
langchain_mode_types = selection_docs_state1['langchain_mode_types']
user_path = None
valid = True
y2 = y.strip().replace(' ', '').split(',')
if len(y2) >= 1:
langchain_mode2 = y2[0]
if len(langchain_mode2) >= 3 and langchain_mode2.isalnum():
# real restriction is:
# ValueError: Expected collection name that (1) contains 3-63 characters, (2) starts and ends with an alphanumeric character, (3) otherwise contains only alphanumeric characters, underscores or hyphens (-), (4) contains no two consecutive periods (..) and (5) is not a valid IPv4 address, got me
# but just make simpler
# assume personal if don't have user_path
langchain_mode_type = y2[1] if len(y2) > 1 else LangChainTypes.PERSONAL.value
user_path = y2[2] if len(y2) > 2 else None # assume None if don't have user_path
if user_path in ['', "''"]:
# transcribe UI input
user_path = None
if langchain_mode_type not in [x.value for x in list(LangChainTypes)]:
textbox = "Invalid type %s" % langchain_mode_type
valid = False
langchain_mode2 = langchain_mode1
elif langchain_mode_type == LangChainTypes.SHARED.value and username1 == guest_name:
textbox = "Guests cannot add shared collections"
valid = False
langchain_mode2 = langchain_mode1
elif user_path is not None and langchain_mode_type == LangChainTypes.PERSONAL.value:
textbox = "Do not pass user_path for personal/scratch types"
valid = False
langchain_mode2 = langchain_mode1
elif user_path is not None and username1 == guest_name:
textbox = "Guests cannot add collections with path"
valid = False
langchain_mode2 = langchain_mode1
elif langchain_mode2 in langchain_modes_intrinsic:
user_path = None
textbox = "Invalid access to use internal name: %s" % langchain_mode2
valid = False
langchain_mode2 = langchain_mode1
elif user_path and allow_upload_to_user_data or not user_path and allow_upload_to_my_data:
if user_path:
user_path = makedirs(user_path, exist_ok=True, use_base=True)
langchain_mode_paths.update({langchain_mode2: user_path})
langchain_mode_types.update({langchain_mode2: langchain_mode_type})
if langchain_mode2 not in langchain_modes:
langchain_modes.append(langchain_mode2)
textbox = ''
else:
valid = False
langchain_mode2 = langchain_mode1
textbox = "Invalid access. user allowed: %s " \
"personal/scratch allowed: %s" % (allow_upload_to_user_data, allow_upload_to_my_data)
else:
valid = False
langchain_mode2 = langchain_mode1
textbox = "Invalid, collection must be >=3 characters and alphanumeric"
else:
valid = False
langchain_mode2 = langchain_mode1
textbox = "Invalid, must be like UserData2, user_path2"
selection_docs_state1 = update_langchain_mode_paths(selection_docs_state1)
df_langchain_mode_paths1 = get_df_langchain_mode_paths(selection_docs_state1, db1s, dbs1=dbs)
choices = get_langchain_choices(selection_docs_state1)
if valid and not user_path:
# needs to have key for it to make it known different from userdata case in _update_user_db()
from src.gpt_langchain import length_db1
db1s[langchain_mode2] = [None] * length_db1()
if valid:
chat_state1 = None
text_output1, text_output21, text_outputs1 = None, None, None
save_auth_func(selection_docs_state1, requests_state1,
chat_state1, langchain_mode2,
text_output1, text_output21, text_outputs1,
)
return db1s, selection_docs_state1, gr.update(choices=choices,
value=langchain_mode2), textbox, df_langchain_mode_paths1
def remove_langchain_mode(db1s, selection_docs_state1, requests_state1,
langchain_mode1, langchain_mode2, dbsu=None, auth_filename=None, auth_freeze=None,
guest_name=None,
purge=False):
assert auth_filename is not None
assert auth_freeze is not None
set_userid_gr(db1s, requests_state1, get_userid_auth)
for k in db1s:
set_dbid_gr(db1s[k])
assert dbsu is not None
langchain_modes = selection_docs_state1['langchain_modes']
langchain_mode_paths = selection_docs_state1['langchain_mode_paths']
langchain_mode_types = selection_docs_state1['langchain_mode_types']
langchain_type2 = langchain_mode_types.get(langchain_mode2, LangChainTypes.EITHER.value)
changed_state = False
textbox = "Invalid access, cannot remove %s" % langchain_mode2
in_scratch_db = langchain_mode2 in db1s
in_user_db = dbsu is not None and langchain_mode2 in dbsu
if in_scratch_db and not allow_upload_to_my_data or \
in_user_db and not allow_upload_to_user_data or \
langchain_mode2 in langchain_modes_intrinsic:
can_remove = False
can_purge = False
if langchain_mode2 in langchain_modes_intrinsic:
can_purge = True
else:
can_remove = True
can_purge = True
# change global variables
if langchain_mode2 in langchain_modes or langchain_mode2 in langchain_mode_paths or langchain_mode2 in db1s:
if can_purge and purge:
# remove source files
from src.gpt_langchain import get_sources, del_from_db
sources_file, source_list, num_chunks, num_sources_str, db = \
get_sources(db1s, selection_docs_state1,
requests_state1, langchain_mode2, dbs=dbsu,
docs_state0=docs_state0,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
verbose=verbose,
get_userid_auth=get_userid_auth,
n_jobs=n_jobs)
del_from_db(db, source_list, db_type=db_type)
for fil in source_list:
if os.path.isfile(fil):
print("Purged %s" % fil, flush=True)
remove(fil)
# remove db directory
from src.gpt_langchain import get_persist_directory
persist_directory, langchain_type2 = \
get_persist_directory(langchain_mode2, langchain_type=langchain_type2,
db1s=db1s, dbs=dbsu)
print("removed persist_directory %s" % persist_directory, flush=True)
remove(persist_directory)
textbox = "Purged, but did not remove %s" % langchain_mode2
if can_remove:
if langchain_mode2 in langchain_modes:
langchain_modes.remove(langchain_mode2)
if langchain_mode2 in langchain_mode_paths:
langchain_mode_paths.pop(langchain_mode2)
if langchain_mode2 in langchain_mode_types:
langchain_mode_types.pop(langchain_mode2)
if langchain_mode2 in db1s and langchain_mode2 != LangChainMode.MY_DATA.value:
# don't remove last MyData, used as user hash
db1s.pop(langchain_mode2)
textbox = ""
changed_state = True
else:
textbox = "%s is not visible" % langchain_mode2
# update
selection_docs_state1 = update_langchain_mode_paths(selection_docs_state1)
df_langchain_mode_paths1 = get_df_langchain_mode_paths(selection_docs_state1, db1s, dbs1=dbs)
if changed_state:
chat_state1 = None
text_output1, text_output21, text_outputs1 = None, None, None
save_auth_func(selection_docs_state1, requests_state1,
chat_state1, langchain_mode2,
text_output1, text_output21, text_outputs1,
)
return db1s, selection_docs_state1, \
gr.update(choices=get_langchain_choices(selection_docs_state1),
value=langchain_mode2), textbox, df_langchain_mode_paths1
eventdb20a = new_langchain_mode_text.submit(user_state_setup,
inputs=[my_db_state, requests_state,
new_langchain_mode_text, new_langchain_mode_text],
outputs=[my_db_state, requests_state, new_langchain_mode_text],
show_progress='minimal')
add_langchain_mode_func = functools.partial(add_langchain_mode,
auth_filename=kwargs['auth_filename'],
auth_freeze=kwargs['auth_freeze'],
guest_name=kwargs['guest_name'],
)
eventdb20b = eventdb20a.then(fn=add_langchain_mode_func,
inputs=[my_db_state, selection_docs_state, requests_state,
langchain_mode,
new_langchain_mode_text],
outputs=[my_db_state, selection_docs_state, langchain_mode,
new_langchain_mode_text,
langchain_mode_path_text],
api_name='new_langchain_mode_text' if allow_api and allow_upload_to_user_data else None)
db_events.extend([eventdb20a, eventdb20b])
remove_langchain_mode_func = functools.partial(remove_langchain_mode,
dbsu=dbs,
auth_filename=kwargs['auth_filename'],
auth_freeze=kwargs['auth_freeze'],
guest_name=kwargs['guest_name'],
)
eventdb21a = remove_langchain_mode_text.submit(user_state_setup,
inputs=[my_db_state,
requests_state,
remove_langchain_mode_text, remove_langchain_mode_text],
outputs=[my_db_state,
requests_state, remove_langchain_mode_text],
show_progress='minimal')
remove_langchain_mode_kwargs = dict(fn=remove_langchain_mode_func,
inputs=[my_db_state, selection_docs_state, requests_state,
langchain_mode,
remove_langchain_mode_text],
outputs=[my_db_state, selection_docs_state, langchain_mode,
remove_langchain_mode_text,
langchain_mode_path_text])
eventdb21b = eventdb21a.then(**remove_langchain_mode_kwargs,
api_name='remove_langchain_mode_text' if allow_api and allow_upload_to_user_data else None)
db_events.extend([eventdb21a, eventdb21b])
eventdb22a = purge_langchain_mode_text.submit(user_state_setup,
inputs=[my_db_state,
requests_state,
purge_langchain_mode_text, purge_langchain_mode_text],
outputs=[my_db_state,
requests_state, purge_langchain_mode_text],
show_progress='minimal')
purge_langchain_mode_func = functools.partial(remove_langchain_mode_func, purge=True)
purge_langchain_mode_kwargs = dict(fn=purge_langchain_mode_func,
inputs=[my_db_state, selection_docs_state, requests_state,
langchain_mode,
purge_langchain_mode_text],
outputs=[my_db_state, selection_docs_state, langchain_mode,
purge_langchain_mode_text,
langchain_mode_path_text])
# purge_langchain_mode_kwargs = remove_langchain_mode_kwargs.copy()
# purge_langchain_mode_kwargs['fn'] = functools.partial(remove_langchain_mode_kwargs['fn'], purge=True)
eventdb22b = eventdb22a.then(**purge_langchain_mode_kwargs,
api_name='purge_langchain_mode_text' if allow_api and allow_upload_to_user_data else None)
eventdb22b_auth = eventdb22b.then(**save_auth_kwargs)
db_events.extend([eventdb22a, eventdb22b, eventdb22b_auth])
def load_langchain_gr(db1s, selection_docs_state1, requests_state1, langchain_mode1, auth_filename=None):
load_auth(db1s, requests_state1, auth_filename, selection_docs_state1=selection_docs_state1)
selection_docs_state1 = update_langchain_mode_paths(selection_docs_state1)
df_langchain_mode_paths1 = get_df_langchain_mode_paths(selection_docs_state1, db1s, dbs1=dbs)
return selection_docs_state1, \
gr.update(choices=get_langchain_choices(selection_docs_state1),
value=langchain_mode1), df_langchain_mode_paths1
eventdbloadla = load_langchain.click(user_state_setup,
inputs=[my_db_state, requests_state, langchain_mode],
outputs=[my_db_state, requests_state, langchain_mode],
show_progress='minimal')
load_langchain_gr_func = functools.partial(load_langchain_gr,
auth_filename=kwargs['auth_filename'])
eventdbloadlb = eventdbloadla.then(fn=load_langchain_gr_func,
inputs=[my_db_state, selection_docs_state, requests_state, langchain_mode],
outputs=[selection_docs_state, langchain_mode, langchain_mode_path_text],
api_name='load_langchain' if allow_api and allow_upload_to_user_data else None)
if not kwargs['large_file_count_mode']:
# FIXME: Could add all these functions, inputs, outputs into single function for snappier GUI
# all update events when not doing large file count mode
# Note: Login touches langchain_mode, which triggers all these
lg_change_event2 = lg_change_event_auth.then(**get_sources_kwargs)
lg_change_event3 = lg_change_event2.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
lg_change_event4 = lg_change_event3.then(**show_sources_kwargs)
lg_change_event5 = lg_change_event4.then(**get_viewable_sources_args)
lg_change_event6 = lg_change_event5.then(**viewable_kwargs)
eventdb2c = eventdb2.then(**get_sources_kwargs)
eventdb2d = eventdb2c.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
eventdb2e = eventdb2d.then(**show_sources_kwargs)
eventdb2f = eventdb2e.then(**get_viewable_sources_args)
eventdb2g = eventdb2f.then(**viewable_kwargs)
eventdb1c = eventdb1.then(**get_sources_kwargs)
eventdb1d = eventdb1c.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
eventdb1e = eventdb1d.then(**show_sources_kwargs)
eventdb1f = eventdb1e.then(**get_viewable_sources_args)
eventdb1g = eventdb1f.then(**viewable_kwargs)
eventdb3c = eventdb3.then(**get_sources_kwargs)
eventdb3d = eventdb3c.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
eventdb3e = eventdb3d.then(**show_sources_kwargs)
eventdb3f = eventdb3e.then(**get_viewable_sources_args)
eventdb3g = eventdb3f.then(**viewable_kwargs)
eventdb90ua = eventdb90.then(**get_sources_kwargs)
eventdb90ub = eventdb90ua.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
eventdb90uc = eventdb90ub.then(**show_sources_kwargs)
eventdb90ud = eventdb90uc.then(**get_viewable_sources_args)
eventdb90ue = eventdb90ud.then(**viewable_kwargs)
eventdb20c = eventdb20b.then(**get_sources_kwargs)
eventdb20d = eventdb20c.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
eventdb20e = eventdb20d.then(**show_sources_kwargs)
eventdb20f = eventdb20e.then(**get_viewable_sources_args)
eventdb20g = eventdb20f.then(**viewable_kwargs)
eventdb21c = eventdb21b.then(**get_sources_kwargs)
eventdb21d = eventdb21c.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
eventdb21e = eventdb21d.then(**show_sources_kwargs)
eventdb21f = eventdb21e.then(**get_viewable_sources_args)
eventdb21g = eventdb21f.then(**viewable_kwargs)
eventdb22c = eventdb22b_auth.then(**get_sources_kwargs)
eventdb22d = eventdb22c.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
eventdb22e = eventdb22d.then(**show_sources_kwargs)
eventdb22f = eventdb22e.then(**get_viewable_sources_args)
eventdb22g = eventdb22f.then(**viewable_kwargs)
event_attach3 = event_attach2.then(**get_sources_kwargs)
event_attach4 = event_attach3.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
event_attach5 = event_attach4.then(**show_sources_kwargs)
event_attach6 = event_attach5.then(**get_viewable_sources_args)
event_attach7 = event_attach6.then(**viewable_kwargs)
sync2 = sync1.then(**get_sources_kwargs)
sync3 = sync2.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
sync4 = sync3.then(**show_sources_kwargs)
sync5 = sync4.then(**get_viewable_sources_args)
sync6 = sync5.then(**viewable_kwargs)
eventdb_loginbb = eventdb_loginb.then(**get_sources_kwargs)
eventdb_loginc = eventdb_loginbb.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
# FIXME: Fix redundancy
eventdb_logind = eventdb_loginc.then(**show_sources_kwargs)
eventdb_logine = eventdb_logind.then(**get_viewable_sources_args)
eventdb_loginf = eventdb_logine.then(**viewable_kwargs)
db_events.extend([lg_change_event_auth,
lg_change_event, lg_change_event2, lg_change_event3, lg_change_event4, lg_change_event5,
lg_change_event6] +
[eventdb2c, eventdb2d, eventdb2e, eventdb2f, eventdb2g] +
[eventdb1c, eventdb1d, eventdb1e, eventdb1f, eventdb1g] +
[eventdb3c, eventdb3d, eventdb3e, eventdb3f, eventdb3g] +
[eventdb90ua, eventdb90ub, eventdb90uc, eventdb90ud, eventdb90ue] +
[eventdb20c, eventdb20d, eventdb20e, eventdb20f, eventdb20g] +
[eventdb21c, eventdb21d, eventdb21e, eventdb21f, eventdb21g] +
[eventdb22b_auth, eventdb22c, eventdb22d, eventdb22e, eventdb22f, eventdb22g] +
[event_attach3, event_attach4, event_attach5, event_attach6, event_attach7] +
[sync1, sync2, sync3, sync4, sync5, sync6] +
[eventdb_logina, eventdb_loginb, eventdb_loginbb,
eventdb_loginc, eventdb_logind, eventdb_logine,
eventdb_loginf]
,
)
inputs_list, inputs_dict = get_inputs_list(all_kwargs, kwargs['model_lower'], model_id=1)
inputs_list2, inputs_dict2 = get_inputs_list(all_kwargs, kwargs['model_lower'], model_id=2)
from functools import partial
kwargs_evaluate = {k: v for k, v in all_kwargs.items() if k in inputs_kwargs_list}
# ensure present
for k in inputs_kwargs_list:
assert k in kwargs_evaluate, "Missing %s" % k
def evaluate_nochat(*args1, default_kwargs1=None, str_api=False, plain_api=False, **kwargs1):
args_list = list(args1)
if str_api:
if plain_api:
# i.e. not fresh model, tells evaluate to use model_state0
args_list.insert(0, kwargs['model_state_none'].copy())
args_list.insert(1, my_db_state0.copy())
args_list.insert(2, selection_docs_state0.copy())
args_list.insert(3, requests_state0.copy())
user_kwargs = args_list[len(input_args_list)]
assert isinstance(user_kwargs, str)
user_kwargs = ast.literal_eval(user_kwargs)
else:
assert not plain_api
user_kwargs = {k: v for k, v in zip(eval_func_param_names, args_list[len(input_args_list):])}
# control kwargs1 for evaluate
kwargs1['answer_with_sources'] = -1 # just text chunk, not URL etc.
kwargs1['show_accordions'] = False
kwargs1['append_sources_to_answer'] = False
kwargs1['show_link_in_sources'] = False
kwargs1['top_k_docs_max_show'] = 30
# only used for submit_nochat_api
user_kwargs['chat'] = False
if 'stream_output' not in user_kwargs:
user_kwargs['stream_output'] = False
if plain_api:
user_kwargs['stream_output'] = False
if 'langchain_mode' not in user_kwargs:
# if user doesn't specify, then assume disabled, not use default
if LangChainMode.LLM.value in kwargs['langchain_modes']:
user_kwargs['langchain_mode'] = LangChainMode.LLM.value
elif len(kwargs['langchain_modes']) >= 1:
user_kwargs['langchain_mode'] = kwargs['langchain_modes'][0]
else:
# disabled should always be allowed
user_kwargs['langchain_mode'] = LangChainMode.DISABLED.value
if 'langchain_action' not in user_kwargs:
user_kwargs['langchain_action'] = LangChainAction.QUERY.value
if 'langchain_agents' not in user_kwargs:
user_kwargs['langchain_agents'] = []
# be flexible
if 'instruction' in user_kwargs and 'instruction_nochat' not in user_kwargs:
user_kwargs['instruction_nochat'] = user_kwargs['instruction']
if 'iinput' in user_kwargs and 'iinput_nochat' not in user_kwargs:
user_kwargs['iinput_nochat'] = user_kwargs['iinput']
if 'visible_models' not in user_kwargs:
if kwargs['visible_models']:
if isinstance(kwargs['visible_models'], int):
user_kwargs['visible_models'] = [kwargs['visible_models']]
elif isinstance(kwargs['visible_models'], list):
# only take first one
user_kwargs['visible_models'] = [kwargs['visible_models'][0]]
else:
user_kwargs['visible_models'] = [0]
else:
# if no user version or default version, then just take first
user_kwargs['visible_models'] = [0]
if 'h2ogpt_key' not in user_kwargs:
user_kwargs['h2ogpt_key'] = None
if 'system_prompt' in user_kwargs and user_kwargs['system_prompt'] is None:
# avoid worrying about below default_kwargs -> args_list that checks if None
user_kwargs['system_prompt'] = 'None'
set1 = set(list(default_kwargs1.keys()))
set2 = set(eval_func_param_names)
assert set1 == set2, "Set diff: %s %s: %s" % (set1, set2, set1.symmetric_difference(set2))
# correct ordering. Note some things may not be in default_kwargs, so can't be default of user_kwargs.get()
model_state1 = args_list[0]
my_db_state1 = args_list[1]
selection_docs_state1 = args_list[2]
requests_state1 = args_list[3]
args_list = [user_kwargs[k] if k in user_kwargs and user_kwargs[k] is not None else default_kwargs1[k] for k
in eval_func_param_names]
assert len(args_list) == len(eval_func_param_names)
stream_output1 = args_list[eval_func_param_names.index('stream_output')]
if len(model_states) > 1:
visible_models1 = args_list[eval_func_param_names.index('visible_models')]
model_active_choice1 = visible_models_to_model_choice(visible_models1, api=True)
model_state1 = model_states[model_active_choice1 % len(model_states)]
for key in key_overrides:
if user_kwargs.get(key) is None and model_state1.get(key) is not None:
args_list[eval_func_param_names.index(key)] = model_state1[key]
if hasattr(model_state1['tokenizer'], 'model_max_length'):
# ensure listen to limit, with some buffer
# buffer = 50
buffer = 0
args_list[eval_func_param_names.index('max_new_tokens')] = min(
args_list[eval_func_param_names.index('max_new_tokens')],
model_state1['tokenizer'].model_max_length - buffer)
# override overall visible_models and h2ogpt_key if have model_specific one
# NOTE: only applicable if len(model_states) > 1 at moment
# else controlled by evaluate()
if 'visible_models' in model_state1 and model_state1['visible_models'] is not None:
assert isinstance(model_state1['visible_models'], (int, str, list, tuple))
which_model = visible_models_to_model_choice(model_state1['visible_models'])
args_list[eval_func_param_names.index('visible_models')] = which_model
if 'h2ogpt_key' in model_state1 and model_state1['h2ogpt_key'] is not None:
# remote server key if present
# i.e. may be '' and used to override overall local key
assert isinstance(model_state1['h2ogpt_key'], str)
args_list[eval_func_param_names.index('h2ogpt_key')] = model_state1['h2ogpt_key']
# local key, not for remote server unless same, will be passed through
h2ogpt_key1 = args_list[eval_func_param_names.index('h2ogpt_key')]
max_time1 = args_list[eval_func_param_names.index('max_time')]
# final full evaluate args list
args_list = [model_state1, my_db_state1, selection_docs_state1, requests_state1] + args_list
# NOTE: Don't allow UI-like access, in case modify state via API
valid_key = is_valid_key(kwargs['enforce_h2ogpt_api_key'],
kwargs['enforce_h2ogpt_ui_key'],
kwargs['h2ogpt_api_keys'], h2ogpt_key1,
requests_state1=None)
evaluate_local = evaluate if valid_key else evaluate_fake
save_dict = dict()
ret = {}
ret_old = None
try:
tgen0 = time.time()
gen1 = evaluate_local(*tuple(args_list), **kwargs1)
# NOTE: could use iterator with timeout=0 but not required unless some other reason found
# gen1 = TimeoutIterator(gen1, timeout=0, sentinel=None, raise_on_exception=True)
for res_dict in gen1:
error = res_dict.get('error', '')
extra = res_dict.get('extra', '')
save_dict = res_dict.get('save_dict', {})
# update save_dict
save_dict['error'] = error
save_dict['extra'] = extra
save_dict['valid_key'] = valid_key
save_dict['h2ogpt_key'] = h2ogpt_key1
if str_api and plain_api:
save_dict['which_api'] = 'str_plain_api'
elif str_api:
save_dict['which_api'] = 'str_api'
elif plain_api:
save_dict['which_api'] = 'plain_api'
else:
save_dict['which_api'] = 'nochat_api'
if 'extra_dict' not in save_dict:
save_dict['extra_dict'] = {}
if requests_state1:
save_dict['extra_dict'].update(requests_state1)
else:
save_dict['extra_dict'].update(dict(username='NO_REQUEST'))
if is_public:
# don't want to share actual endpoints
if 'save_dict' in res_dict and isinstance(res_dict['save_dict'], dict):
res_dict['save_dict'].pop('inference_server', None)
if 'extra_dict' in res_dict['save_dict'] and isinstance(res_dict['save_dict']['extra_dict'],
dict):
res_dict['save_dict']['extra_dict'].pop('inference_server', None)
# get response
if str_api:
# full return of dict
ret = res_dict
elif kwargs['langchain_mode'] == 'Disabled':
ret = fix_text_for_gradio(res_dict['response'])
else:
ret = '<br>' + fix_text_for_gradio(res_dict['response'])
if stream_output1 and ret != ret_old:
# yield as it goes, else need to wait since predict only returns first yield
yield ret
if isinstance(ret, dict):
ret_old = ret.copy()
else:
ret_old = ret
if time.time() - tgen0 > max_time1 + 10: # don't use actual, so inner has chance to complete
if verbose:
print("Took too long evaluate_nochat: %s" % (time.time() - tgen0), flush=True)
break
# yield if anything left over as can happen (FIXME: Understand better)
# return back last ret
yield ret
finally:
clear_torch_cache()
clear_embeddings(user_kwargs['langchain_mode'], my_db_state1)
save_generate_output(**save_dict)
kwargs_evaluate_nochat = kwargs_evaluate.copy()
# nominally never want sources appended for API calls, which is what nochat used for primarily
kwargs_evaluate_nochat.update(dict(append_sources_to_answer=False))
fun = partial(evaluate_nochat,
default_kwargs1=default_kwargs,
str_api=False,
**kwargs_evaluate_nochat)
fun_with_dict_str = partial(evaluate_nochat,
default_kwargs1=default_kwargs,
str_api=True,
**kwargs_evaluate_nochat
)
fun_with_dict_str_plain = partial(evaluate_nochat,
default_kwargs1=default_kwargs,
str_api=True,
plain_api=True,
**kwargs_evaluate_nochat
)
dark_mode_btn.click(
None,
None,
None,
_js=wrap_js_to_lambda(0, get_dark_js()),
api_name="dark" if allow_api else None,
queue=False,
)
# Handle uploads from API
upload_api_btn = gr.UploadButton("Upload File Results", visible=False)
file_upload_api = gr.File(visible=False)
file_upload_text = gr.Textbox(visible=False)
def upload_file(files):
if isinstance(files, list):
file_paths = [file.name for file in files]
else:
file_paths = files.name
return file_paths, file_paths
upload_api_btn.upload(fn=upload_file,
inputs=upload_api_btn,
outputs=[file_upload_api, file_upload_text],
api_name='upload_api' if allow_upload_api else None)
def visible_toggle(x):
x = 'off' if x == 'on' else 'on'
return x, gr.Column.update(visible=True if x == 'on' else False)
side_bar_btn.click(fn=visible_toggle,
inputs=side_bar_text,
outputs=[side_bar_text, side_bar],
queue=False)
doc_count_btn.click(fn=visible_toggle,
inputs=doc_count_text,
outputs=[doc_count_text, row_doc_track],
queue=False)
submit_buttons_btn.click(fn=visible_toggle,
inputs=submit_buttons_text,
outputs=[submit_buttons_text, submit_buttons],
queue=False)
visible_model_btn.click(fn=visible_toggle,
inputs=visible_models_text,
outputs=[visible_models_text, visible_models],
queue=False)
# examples after submit or any other buttons for chat or no chat
if kwargs['examples'] is not None and kwargs['show_examples']:
gr.Examples(examples=kwargs['examples'], inputs=inputs_list)
# Score
def score_last_response(*args, nochat=False, num_model_lock=0):
try:
if num_model_lock > 0:
# then lock way
args_list = list(args).copy()
outputs = args_list[-num_model_lock:]
score_texts1 = []
for output in outputs:
# same input, put into form good for _score_last_response()
args_list[-1] = output
score_texts1.append(
_score_last_response(*tuple(args_list), nochat=nochat,
num_model_lock=num_model_lock, prefix=''))
if len(score_texts1) > 1:
return "Response Scores: %s" % ' '.join(score_texts1)
else:
return "Response Scores: %s" % score_texts1[0]
else:
return _score_last_response(*args, nochat=nochat, num_model_lock=num_model_lock)
finally:
clear_torch_cache()
def _score_last_response(*args, nochat=False, num_model_lock=0, prefix='Response Score: '):
""" Similar to user() """
args_list = list(args)
smodel = score_model_state0['model']
stokenizer = score_model_state0['tokenizer']
sdevice = score_model_state0['device']
if memory_restriction_level > 0:
max_length_tokenize = 768 - 256 if memory_restriction_level <= 2 else 512 - 256
elif hasattr(stokenizer, 'model_max_length'):
max_length_tokenize = stokenizer.model_max_length
else:
# limit to 1024, not worth OOMing on reward score
max_length_tokenize = 2048 - 1024
cutoff_len = max_length_tokenize * 4 # restrict deberta related to max for LLM
if not nochat:
history = args_list[-1]
if history is None:
history = []
if smodel is not None and \
stokenizer is not None and \
sdevice is not None and \
history is not None and len(history) > 0 and \
history[-1] is not None and \
len(history[-1]) >= 2:
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
question = history[-1][0]
answer = history[-1][1]
else:
return '%sNA' % prefix
else:
answer = args_list[-1]
instruction_nochat_arg_id = eval_func_param_names.index('instruction_nochat')
question = args_list[instruction_nochat_arg_id]
if question is None:
return '%sBad Question' % prefix
if answer is None:
return '%sBad Answer' % prefix
try:
score = score_qa(smodel, stokenizer, max_length_tokenize, question, answer, cutoff_len)
finally:
clear_torch_cache()
if isinstance(score, str):
return '%sNA' % prefix
return '{}{:.1%}'.format(prefix, score)
def noop_score_last_response(*args, **kwargs):
return "Response Score: Disabled"
if kwargs['score_model']:
score_fun = score_last_response
else:
score_fun = noop_score_last_response
score_args = dict(fn=score_fun,
inputs=inputs_list + [text_output],
outputs=[score_text],
)
score_args2 = dict(fn=partial(score_fun),
inputs=inputs_list2 + [text_output2],
outputs=[score_text2],
)
score_fun_func = functools.partial(score_fun, num_model_lock=len(text_outputs))
all_score_args = dict(fn=score_fun_func,
inputs=inputs_list + text_outputs,
outputs=score_text,
)
score_args_nochat = dict(fn=partial(score_fun, nochat=True),
inputs=inputs_list + [text_output_nochat],
outputs=[score_text_nochat],
)
def update_history(*args, undo=False, retry=False, sanitize_user_prompt=False):
"""
User that fills history for bot
:param args:
:param undo:
:param retry:
:param sanitize_user_prompt:
:return:
"""
args_list = list(args)
user_message = args_list[eval_func_param_names.index('instruction')] # chat only
input1 = args_list[eval_func_param_names.index('iinput')] # chat only
prompt_type1 = args_list[eval_func_param_names.index('prompt_type')]
langchain_mode1 = args_list[eval_func_param_names.index('langchain_mode')]
langchain_action1 = args_list[eval_func_param_names.index('langchain_action')]
langchain_agents1 = args_list[eval_func_param_names.index('langchain_agents')]
document_subset1 = args_list[eval_func_param_names.index('document_subset')]
document_choice1 = args_list[eval_func_param_names.index('document_choice')]
if not prompt_type1:
# shouldn't have to specify if CLI launched model
prompt_type1 = kwargs['prompt_type']
# apply back
args_list[eval_func_param_names.index('prompt_type')] = prompt_type1
if input1 and not user_message.endswith(':'):
user_message1 = user_message + ":" + input1
elif input1:
user_message1 = user_message + input1
else:
user_message1 = user_message
if sanitize_user_prompt:
pass
# requirements.txt has comment that need to re-enable the below 2 lines
# from better_profanity import profanity
# user_message1 = profanity.censor(user_message1)
history = args_list[-1]
if history is None:
# bad history
history = []
history = history.copy()
if undo:
if len(history) > 0:
history.pop()
return history
if retry:
if history:
history[-1][1] = None
return history
if user_message1 in ['', None, '\n']:
if not allow_empty_instruction(langchain_mode1, document_subset1, langchain_action1):
# reject non-retry submit/enter
return history
user_message1 = fix_text_for_gradio(user_message1)
return history + [[user_message1, None]]
def user(*args, undo=False, retry=False, sanitize_user_prompt=False):
return update_history(*args, undo=undo, retry=retry, sanitize_user_prompt=sanitize_user_prompt)
def all_user(*args, undo=False, retry=False, sanitize_user_prompt=False, num_model_lock=0, all_models=None):
args_list = list(args)
visible_models1 = args_list[eval_func_param_names.index('visible_models')]
assert isinstance(all_models, list)
visible_list = get_model_lock_visible_list(visible_models1, all_models)
history_list = args_list[-num_model_lock:]
assert len(all_models) == len(history_list)
assert len(history_list) > 0, "Bad history list: %s" % history_list
for hi, history in enumerate(history_list):
if not visible_list[hi]:
continue
if num_model_lock > 0:
hargs = args_list[:-num_model_lock].copy()
else:
hargs = args_list.copy()
hargs += [history]
history_list[hi] = update_history(*hargs, undo=undo, retry=retry,
sanitize_user_prompt=sanitize_user_prompt)
if len(history_list) > 1:
return tuple(history_list)
else:
return history_list[0]
def get_model_max_length(model_state1):
if model_state1 and not isinstance(model_state1["tokenizer"], str):
tokenizer = model_state1["tokenizer"]
elif model_state0 and not isinstance(model_state0["tokenizer"], str):
tokenizer = model_state0["tokenizer"]
else:
tokenizer = None
if tokenizer is not None:
return tokenizer.model_max_length
else:
return 2000
def prep_bot(*args, retry=False, which_model=0):
"""
:param args:
:param retry:
:param which_model: identifies which model if doing model_lock
API only called for which_model=0, default for inputs_list, but rest should ignore inputs_list
:return: last element is True if should run bot, False if should just yield history
"""
isize = len(input_args_list) + 1 # states + chat history
# don't deepcopy, can contain model itself
args_list = list(args).copy()
model_state1 = args_list[-isize]
my_db_state1 = args_list[-isize + 1]
selection_docs_state1 = args_list[-isize + 2]
requests_state1 = args_list[-isize + 3]
history = args_list[-1]
if not history:
history = []
# NOTE: For these, could check if None, then automatically use CLI values, but too complex behavior
prompt_type1 = args_list[eval_func_param_names.index('prompt_type')]
prompt_dict1 = args_list[eval_func_param_names.index('prompt_dict')]
max_time1 = args_list[eval_func_param_names.index('max_time')]
stream_output1 = args_list[eval_func_param_names.index('stream_output')]
langchain_mode1 = args_list[eval_func_param_names.index('langchain_mode')]
langchain_action1 = args_list[eval_func_param_names.index('langchain_action')]
document_subset1 = args_list[eval_func_param_names.index('document_subset')]
h2ogpt_key1 = args_list[eval_func_param_names.index('h2ogpt_key')]
chat_conversation1 = args_list[eval_func_param_names.index('chat_conversation')]
valid_key = is_valid_key(kwargs['enforce_h2ogpt_api_key'],
kwargs['enforce_h2ogpt_ui_key'],
kwargs['h2ogpt_api_keys'], h2ogpt_key1,
requests_state1=requests_state1)
dummy_return = history, None, langchain_mode1, my_db_state1, requests_state1, \
valid_key, h2ogpt_key1, \
max_time1, stream_output1
if model_state1['model'] is None or model_state1['model'] == no_model_str:
return dummy_return
args_list = args_list[:-isize] # only keep rest needed for evaluate()
if not history:
if verbose:
print("No history", flush=True)
return dummy_return
instruction1 = history[-1][0]
if retry and history:
# if retry, pop history and move onto bot stuff
instruction1 = history[-1][0]
history[-1][1] = None
elif not instruction1:
if not allow_empty_instruction(langchain_mode1, document_subset1, langchain_action1):
# if not retrying, then reject empty query
return dummy_return
elif len(history) > 0 and history[-1][1] not in [None, '']:
# reject submit button if already filled and not retrying
# None when not filling with '' to keep client happy
return dummy_return
evaluate_local = evaluate if valid_key else evaluate_fake
# shouldn't have to specify in API prompt_type if CLI launched model, so prefer global CLI one if have it
prompt_type1, prompt_dict1 = update_prompt(prompt_type1, prompt_dict1, model_state1,
which_model=which_model)
# apply back to args_list for evaluate()
args_list[eval_func_param_names.index('prompt_type')] = prompt_type1
args_list[eval_func_param_names.index('prompt_dict')] = prompt_dict1
context1 = args_list[eval_func_param_names.index('context')]
chat_conversation1 = merge_chat_conversation_history(chat_conversation1, history)
args_list[eval_func_param_names.index('chat_conversation')] = chat_conversation1
if 'visible_models' in model_state1 and model_state1['visible_models'] is not None:
assert isinstance(model_state1['visible_models'], (int, str))
args_list[eval_func_param_names.index('visible_models')] = model_state1['visible_models']
if 'h2ogpt_key' in model_state1 and model_state1['h2ogpt_key'] is not None:
# i.e. may be '' and used to override overall local key
assert isinstance(model_state1['h2ogpt_key'], str)
args_list[eval_func_param_names.index('h2ogpt_key')] = model_state1['h2ogpt_key']
args_list[0] = instruction1 # override original instruction with history from user
args_list[2] = context1
fun1 = partial(evaluate_local,
model_state1,
my_db_state1,
selection_docs_state1,
requests_state1,
*tuple(args_list),
**kwargs_evaluate)
return history, fun1, langchain_mode1, my_db_state1, requests_state1, \
valid_key, h2ogpt_key1, \
max_time1, stream_output1
def gen1_fake(fun1, history):
error = ''
extra = ''
save_dict = dict()
yield history, error, extra, save_dict
return
def get_response(fun1, history):
"""
bot that consumes history for user input
instruction (from input_list) itself is not consumed by bot
:return:
"""
error = ''
extra = ''
save_dict = dict()
if not fun1:
yield history, error, extra, save_dict
return
try:
for output_fun in fun1():
output = output_fun['response']
extra = output_fun['sources'] # FIXME: can show sources in separate text box etc.
save_dict = output_fun.get('save_dict', {})
# ensure good visually, else markdown ignores multiple \n
bot_message = fix_text_for_gradio(output)
history[-1][1] = bot_message
yield history, error, extra, save_dict
except StopIteration:
yield history, error, extra, save_dict
except RuntimeError as e:
if "generator raised StopIteration" in str(e):
# assume last entry was bad, undo
history.pop()
yield history, error, extra, save_dict
else:
if history and len(history) > 0 and len(history[0]) > 1 and history[-1][1] is None:
history[-1][1] = ''
yield history, str(e), extra, save_dict
raise
except Exception as e:
# put error into user input
ex = "Exception: %s" % str(e)
if history and len(history) > 0 and len(history[0]) > 1 and history[-1][1] is None:
history[-1][1] = ''
yield history, ex, extra, save_dict
raise
finally:
# clear_torch_cache()
# don't clear torch cache here, too early and stalls generation if used for all_bot()
pass
return
def clear_embeddings(langchain_mode1, db1s):
# clear any use of embedding that sits on GPU, else keeps accumulating GPU usage even if clear torch cache
if db_type in ['chroma', 'chroma_old'] and langchain_mode1 not in ['LLM', 'Disabled', None, '']:
from gpt_langchain import clear_embedding, length_db1
db = dbs.get('langchain_mode1')
if db is not None and not isinstance(db, str):
clear_embedding(db)
if db1s is not None and langchain_mode1 in db1s:
db1 = db1s[langchain_mode1]
if len(db1) == length_db1():
clear_embedding(db1[0])
def bot(*args, retry=False):
history, fun1, langchain_mode1, db1, requests_state1, \
valid_key, h2ogpt_key1, \
max_time1, stream_output1 = prep_bot(*args, retry=retry)
save_dict = dict()
error = ''
extra = ''
history_str_old = ''
error_old = ''
try:
tgen0 = time.time()
for res in get_response(fun1, history):
do_yield = False
history, error, extra, save_dict = res
# pass back to gradio only these, rest are consumed in this function
history_str = str(history)
do_yield |= (history_str != history_str_old or error != error_old)
if stream_output1 and do_yield:
yield history, error
history_str_old = history_str
error_old = error
if time.time() - tgen0 > max_time1 + 10: # don't use actual, so inner has chance to complete
if verbose:
print("Took too long bot: %s" % (time.time() - tgen0), flush=True)
break
# yield if anything left over
yield history, error
finally:
clear_torch_cache()
clear_embeddings(langchain_mode1, db1)
# save
if 'extra_dict' not in save_dict:
save_dict['extra_dict'] = {}
save_dict['valid_key'] = valid_key
save_dict['h2ogpt_key'] = h2ogpt_key1
if requests_state1:
save_dict['extra_dict'].update(requests_state1)
else:
save_dict['extra_dict'].update(dict(username='NO_REQUEST'))
save_dict['error'] = error
save_dict['extra'] = extra
save_dict['which_api'] = 'bot'
save_generate_output(**save_dict)
def all_bot(*args, retry=False, model_states1=None, all_models=None):
args_list = list(args).copy()
chatbots = args_list[-len(model_states1):]
args_list0 = args_list[:-len(model_states1)] # same for all models
exceptions = []
stream_output1 = args_list[eval_func_param_names.index('stream_output')]
max_time1 = args_list[eval_func_param_names.index('max_time')]
langchain_mode1 = args_list[eval_func_param_names.index('langchain_mode')]
visible_models1 = args_list[eval_func_param_names.index('visible_models')]
assert isinstance(all_models, list)
assert len(all_models) == len(model_states1)
visible_list = get_model_lock_visible_list(visible_models1, all_models)
isize = len(input_args_list) + 1 # states + chat history
db1s = None
requests_state1 = None
valid_key = False
h2ogpt_key1 = ''
extras = []
exceptions = []
save_dicts = []
try:
gen_list = []
num_visible_bots = sum(visible_list)
for chatboti, (chatbot1, model_state1) in enumerate(zip(chatbots, model_states1)):
args_list1 = args_list0.copy()
args_list1.insert(-isize + 2,
model_state1) # insert at -2 so is at -3, and after chatbot1 added, at -4
# if at start, have None in response still, replace with '' so client etc. acts like normal
# assumes other parts of code treat '' and None as if no response yet from bot
# can't do this later in bot code as racy with threaded generators
if chatbot1 is None:
chatbot1 = []
if len(chatbot1) > 0 and len(chatbot1[-1]) == 2 and chatbot1[-1][1] is None:
chatbot1[-1][1] = ''
args_list1.append(chatbot1)
# so consistent with prep_bot()
# with model_state1 at -3, my_db_state1 at -2, and history(chatbot) at -1
# langchain_mode1 and my_db_state1 and requests_state1 should be same for every bot
history, fun1, langchain_mode1, db1s, requests_state1, \
valid_key, h2ogpt_key1, \
max_time1, stream_output1 = \
prep_bot(*tuple(args_list1), retry=retry, which_model=chatboti)
if num_visible_bots == 1:
# no need to lag, will be faster this way
lag = 0
else:
lag = 1e-3
if visible_list[chatboti]:
gen1 = get_response(fun1, history)
# always use stream or not, so do not block any iterator/generator
gen1 = TimeoutIterator(gen1, timeout=lag, sentinel=None, raise_on_exception=False)
# else timeout will truncate output for non-streaming case
else:
gen1 = gen1_fake(fun1, history)
gen_list.append(gen1)
finally:
pass
def choose_exc(x):
# don't expose ports etc. to exceptions window
if is_public:
return "Endpoint unavailable or failed"
else:
return x
bots = bots_old = chatbots.copy()
bots_str = bots_old_str = str(chatbots)
exceptions = exceptions_old = [''] * len(bots_old)
exceptions_str = '\n'.join(
['Model %s: %s' % (iix, choose_exc(x)) for iix, x in enumerate(exceptions) if
x not in [None, '', 'None']])
exceptions_old_str = exceptions_str
extras = extras_old = [''] * len(bots_old)
save_dicts = save_dicts_old = [{}] * len(bots_old)
tgen0 = time.time()
try:
for res1 in itertools.zip_longest(*gen_list):
do_yield = False
bots = [x[0] if x is not None and not isinstance(x, BaseException) else y
for x, y in zip(res1, bots_old)]
bots_str = str(bots)
do_yield |= bots_str != bots_old_str
bots_old_str = bots_str
def larger_str(x, y):
return x if len(x) > len(y) else y
exceptions = [x[1] if x is not None and not isinstance(x, BaseException) else larger_str(str(x), y)
for x, y in zip(res1, exceptions_old)]
do_yield |= exceptions != exceptions_old
exceptions_old = exceptions.copy()
extras = [x[2] if x is not None and not isinstance(x, BaseException) else y
for x, y in zip(res1, extras_old)]
extras_old = extras.copy()
save_dicts = [x[3] if x is not None and not isinstance(x, BaseException) else y
for x, y in zip(res1, save_dicts_old)]
save_dicts_old = save_dicts.copy()
exceptions_str = '\n'.join(
['Model %s: %s' % (iix, choose_exc(x)) for iix, x in enumerate(exceptions) if
x not in [None, '', 'None']])
do_yield |= exceptions_str != exceptions_old_str
exceptions_old_str = exceptions_str
# yield back to gradio only is bots + exceptions, rest are consumed locally
if stream_output1 and do_yield:
if len(bots) > 1:
yield tuple(bots + [exceptions_str])
else:
yield bots[0], exceptions_str
if time.time() - tgen0 > max_time1 + 10: # don't use actual, so inner has chance to complete
if verbose:
print("Took too long all_bot: %s" % (time.time() - tgen0), flush=True)
break
if exceptions:
exceptions_reduced = [x for x in exceptions if x not in ['', None, 'None']]
if exceptions_reduced:
print("Generate exceptions: %s" % exceptions_reduced, flush=True)
# yield if anything left over as can happen (FIXME: Understand better)
if len(bots) > 1:
yield tuple(bots + [exceptions_str])
else:
yield bots[0], exceptions_str
finally:
clear_torch_cache()
clear_embeddings(langchain_mode1, db1s)
# save
for extra, error, save_dict, model_name in zip(extras, exceptions, save_dicts, all_models):
if 'extra_dict' not in save_dict:
save_dict['extra_dict'] = {}
if requests_state1:
save_dict['extra_dict'].update(requests_state1)
else:
save_dict['extra_dict'].update(dict(username='NO_REQUEST'))
save_dict['error'] = error
save_dict['extra'] = extra
save_dict['which_api'] = 'all_bot_%s' % model_name
save_dict['valid_key'] = valid_key
save_dict['h2ogpt_key'] = h2ogpt_key1
save_generate_output(**save_dict)
# NORMAL MODEL
user_args = dict(fn=functools.partial(user, sanitize_user_prompt=kwargs['sanitize_user_prompt']),
inputs=inputs_list + [text_output],
outputs=text_output,
)
bot_args = dict(fn=bot,
inputs=inputs_list + [model_state, my_db_state, selection_docs_state, requests_state] + [
text_output],
outputs=[text_output, chat_exception_text],
)
retry_bot_args = dict(fn=functools.partial(bot, retry=True),
inputs=inputs_list + [model_state, my_db_state, selection_docs_state, requests_state] + [
text_output],
outputs=[text_output, chat_exception_text],
)
retry_user_args = dict(fn=functools.partial(user, retry=True),
inputs=inputs_list + [text_output],
outputs=text_output,
)
undo_user_args = dict(fn=functools.partial(user, undo=True),
inputs=inputs_list + [text_output],
outputs=text_output,
)
# MODEL2
user_args2 = dict(fn=functools.partial(user, sanitize_user_prompt=kwargs['sanitize_user_prompt']),
inputs=inputs_list2 + [text_output2],
outputs=text_output2,
)
bot_args2 = dict(fn=bot,
inputs=inputs_list2 + [model_state2, my_db_state, selection_docs_state, requests_state] + [
text_output2],
outputs=[text_output2, chat_exception_text],
)
retry_bot_args2 = dict(fn=functools.partial(bot, retry=True),
inputs=inputs_list2 + [model_state2, my_db_state, selection_docs_state,
requests_state] + [text_output2],
outputs=[text_output2, chat_exception_text],
)
retry_user_args2 = dict(fn=functools.partial(user, retry=True),
inputs=inputs_list2 + [text_output2],
outputs=text_output2,
)
undo_user_args2 = dict(fn=functools.partial(user, undo=True),
inputs=inputs_list2 + [text_output2],
outputs=text_output2,
)
# MODEL N
all_user_args = dict(fn=functools.partial(all_user,
sanitize_user_prompt=kwargs['sanitize_user_prompt'],
num_model_lock=len(text_outputs),
all_models=kwargs['all_models']
),
inputs=inputs_list + text_outputs,
outputs=text_outputs,
)
all_bot_args = dict(fn=functools.partial(all_bot, model_states1=model_states,
all_models=kwargs['all_models']),
inputs=inputs_list + [my_db_state, selection_docs_state, requests_state] +
text_outputs,
outputs=text_outputs + [chat_exception_text],
)
all_retry_bot_args = dict(fn=functools.partial(all_bot, model_states1=model_states,
all_models=kwargs['all_models'],
retry=True),
inputs=inputs_list + [my_db_state, selection_docs_state, requests_state] +
text_outputs,
outputs=text_outputs + [chat_exception_text],
)
all_retry_user_args = dict(fn=functools.partial(all_user, retry=True,
sanitize_user_prompt=kwargs['sanitize_user_prompt'],
num_model_lock=len(text_outputs),
all_models=kwargs['all_models']
),
inputs=inputs_list + text_outputs,
outputs=text_outputs,
)
all_undo_user_args = dict(fn=functools.partial(all_user, undo=True,
sanitize_user_prompt=kwargs['sanitize_user_prompt'],
num_model_lock=len(text_outputs),
all_models=kwargs['all_models']
),
inputs=inputs_list + text_outputs,
outputs=text_outputs,
)
def clear_instruct():
return gr.Textbox(value='')
def deselect_radio_chats():
return gr.update(value=None)
def clear_all():
return gr.Textbox(value=''), gr.Textbox(value=''), gr.update(value=None), \
gr.Textbox(value=''), gr.Textbox(value='')
if kwargs['model_states']:
submits1 = submits2 = submits3 = []
submits4 = []
triggers = [instruction, submit, retry_btn]
fun_source = [instruction.submit, submit.click, retry_btn.click]
fun_name = ['instruction', 'submit', 'retry']
user_args = [all_user_args, all_user_args, all_retry_user_args]
bot_args = [all_bot_args, all_bot_args, all_retry_bot_args]
for userargs1, botarg1, funn1, funs1, trigger1, in zip(user_args, bot_args, fun_name, fun_source, triggers):
submit_event11 = funs1(fn=user_state_setup,
inputs=[my_db_state, requests_state, trigger1, trigger1],
outputs=[my_db_state, requests_state, trigger1],
queue=queue)
submit_event1a = submit_event11.then(**userargs1, queue=queue,
api_name='%s' % funn1 if allow_api else None)
# if hit enter on new instruction for submitting new query, no longer the saved chat
submit_event1b = submit_event1a.then(clear_all, inputs=None,
outputs=[instruction, iinput, radio_chats, score_text,
score_text2],
queue=queue)
submit_event1c = submit_event1b.then(**botarg1,
api_name='%s_bot' % funn1 if allow_api else None,
queue=queue)
submit_event1d = submit_event1c.then(**all_score_args,
api_name='%s_bot_score' % funn1 if allow_api else None,
queue=queue)
submits1.extend([submit_event1a, submit_event1b, submit_event1c, submit_event1d])
# if undo, no longer the saved chat
submit_event4 = undo.click(fn=user_state_setup,
inputs=[my_db_state, requests_state, undo, undo],
outputs=[my_db_state, requests_state, undo],
queue=queue) \
.then(**all_undo_user_args, api_name='undo' if allow_api else None) \
.then(clear_all, inputs=None, outputs=[instruction, iinput, radio_chats, score_text,
score_text2], queue=queue) \
.then(**all_score_args, api_name='undo_score' if allow_api else None)
submits4 = [submit_event4]
else:
# in case 2nd model, consume instruction first, so can clear quickly
# bot doesn't consume instruction itself, just history from user, so why works
submit_event11 = instruction.submit(fn=user_state_setup,
inputs=[my_db_state, requests_state, instruction, instruction],
outputs=[my_db_state, requests_state, instruction],
queue=queue)
submit_event1a = submit_event11.then(**user_args, queue=queue,
api_name='instruction' if allow_api else None)
# if hit enter on new instruction for submitting new query, no longer the saved chat
submit_event1a2 = submit_event1a.then(deselect_radio_chats, inputs=None, outputs=radio_chats, queue=queue)
submit_event1b = submit_event1a2.then(**user_args2, api_name='instruction2' if allow_api else None)
submit_event1c = submit_event1b.then(clear_instruct, None, instruction) \
.then(clear_instruct, None, iinput)
submit_event1d = submit_event1c.then(**bot_args, api_name='instruction_bot' if allow_api else None,
queue=queue)
submit_event1e = submit_event1d.then(**score_args,
api_name='instruction_bot_score' if allow_api else None,
queue=queue)
submit_event1f = submit_event1e.then(**bot_args2, api_name='instruction_bot2' if allow_api else None,
queue=queue)
submit_event1g = submit_event1f.then(**score_args2,
api_name='instruction_bot_score2' if allow_api else None, queue=queue)
submits1 = [submit_event1a, submit_event1a2, submit_event1b, submit_event1c, submit_event1d,
submit_event1e,
submit_event1f, submit_event1g]
submit_event21 = submit.click(fn=user_state_setup,
inputs=[my_db_state, requests_state, submit, submit],
outputs=[my_db_state, requests_state, submit],
queue=queue)
submit_event2a = submit_event21.then(**user_args, api_name='submit' if allow_api else None)
# if submit new query, no longer the saved chat
submit_event2a2 = submit_event2a.then(deselect_radio_chats, inputs=None, outputs=radio_chats, queue=queue)
submit_event2b = submit_event2a2.then(**user_args2, api_name='submit2' if allow_api else None)
submit_event2c = submit_event2b.then(clear_all, inputs=None,
outputs=[instruction, iinput, radio_chats, score_text, score_text2],
queue=queue)
submit_event2d = submit_event2c.then(**bot_args, api_name='submit_bot' if allow_api else None, queue=queue)
submit_event2e = submit_event2d.then(**score_args,
api_name='submit_bot_score' if allow_api else None,
queue=queue)
submit_event2f = submit_event2e.then(**bot_args2, api_name='submit_bot2' if allow_api else None,
queue=queue)
submit_event2g = submit_event2f.then(**score_args2,
api_name='submit_bot_score2' if allow_api else None,
queue=queue)
submits2 = [submit_event2a, submit_event2a2, submit_event2b, submit_event2c, submit_event2d,
submit_event2e,
submit_event2f, submit_event2g]
submit_event31 = retry_btn.click(fn=user_state_setup,
inputs=[my_db_state, requests_state, retry_btn, retry_btn],
outputs=[my_db_state, requests_state, retry_btn],
queue=queue)
submit_event3a = submit_event31.then(**user_args, api_name='retry' if allow_api else None)
# if retry, no longer the saved chat
submit_event3a2 = submit_event3a.then(deselect_radio_chats, inputs=None, outputs=radio_chats, queue=queue)
submit_event3b = submit_event3a2.then(**user_args2, api_name='retry2' if allow_api else None)
submit_event3c = submit_event3b.then(clear_instruct, None, instruction) \
.then(clear_instruct, None, iinput)
submit_event3d = submit_event3c.then(**retry_bot_args, api_name='retry_bot' if allow_api else None,
queue=queue)
submit_event3e = submit_event3d.then(**score_args,
api_name='retry_bot_score' if allow_api else None,
queue=queue)
submit_event3f = submit_event3e.then(**retry_bot_args2, api_name='retry_bot2' if allow_api else None,
queue=queue)
submit_event3g = submit_event3f.then(**score_args2,
api_name='retry_bot_score2' if allow_api else None,
queue=queue)
submits3 = [submit_event3a, submit_event3a2, submit_event3b, submit_event3c, submit_event3d,
submit_event3e,
submit_event3f, submit_event3g]
# if undo, no longer the saved chat
submit_event4 = undo.click(fn=user_state_setup,
inputs=[my_db_state, requests_state, undo, undo],
outputs=[my_db_state, requests_state, undo],
queue=queue) \
.then(**undo_user_args, api_name='undo' if allow_api else None) \
.then(**undo_user_args2, api_name='undo2' if allow_api else None) \
.then(clear_all, inputs=None, outputs=[instruction, iinput, radio_chats, score_text,
score_text2], queue=queue) \
.then(**score_args, api_name='undo_score' if allow_api else None) \
.then(**score_args2, api_name='undo_score2' if allow_api else None)
submits4 = [submit_event4]
# MANAGE CHATS
def dedup(short_chat, short_chats):
if short_chat not in short_chats:
return short_chat
for i in range(1, 1000):
short_chat_try = short_chat + "_" + str(i)
if short_chat_try not in short_chats:
return short_chat_try
# fallback and hope for best
short_chat = short_chat + "_" + str(random.random())
return short_chat
def get_short_chat(x, short_chats, short_len=20, words=4):
if x and len(x[0]) == 2 and x[0][0] is not None:
short_chat = ' '.join(x[0][0][:short_len].split(' ')[:words]).strip()
if not short_chat:
# e.g.summarization, try using answer
short_chat = ' '.join(x[0][1][:short_len].split(' ')[:words]).strip()
if not short_chat:
short_chat = 'Unk'
short_chat = dedup(short_chat, short_chats)
else:
short_chat = None
return short_chat
def is_chat_same(x, y):
# <p> etc. added in chat, try to remove some of that to help avoid dup entries when hit new conversation
is_same = True
# length of conversation has to be same
if len(x) != len(y):
return False
if len(x) != len(y):
return False
for stepx, stepy in zip(x, y):
if len(stepx) != len(stepy):
# something off with a conversation
return False
for stepxx, stepyy in zip(stepx, stepy):
if len(stepxx) != len(stepyy):
# something off with a conversation
return False
if len(stepxx) != 2:
# something off
return False
if len(stepyy) != 2:
# something off
return False
questionx = stepxx[0].replace('<p>', '').replace('</p>', '') if stepxx[0] is not None else None
answerx = stepxx[1].replace('<p>', '').replace('</p>', '') if stepxx[1] is not None else None
questiony = stepyy[0].replace('<p>', '').replace('</p>', '') if stepyy[0] is not None else None
answery = stepyy[1].replace('<p>', '').replace('</p>', '') if stepyy[1] is not None else None
if questionx != questiony or answerx != answery:
return False
return is_same
def save_chat(*args, chat_is_list=False, auth_filename=None, auth_freeze=None, raise_if_none=True):
args_list = list(args)
db1s = args_list[0]
requests_state1 = args_list[1]
args_list = args_list[2:]
if not chat_is_list:
# list of chatbot histories,
# can't pass in list with list of chatbot histories and state due to gradio limits
chat_list = args_list[:-1]
else:
assert len(args_list) == 2
chat_list = args_list[0]
# if old chat file with single chatbot, get into shape
if isinstance(chat_list, list) and len(chat_list) > 0 and isinstance(chat_list[0], list) and len(
chat_list[0]) == 2 and isinstance(chat_list[0][0], str) and isinstance(chat_list[0][1], str):
chat_list = [chat_list]
# remove None histories
chat_list_not_none = [x for x in chat_list if x and len(x) > 0 and len(x[0]) == 2 and x[0][1] is not None]
chat_list_none = [x for x in chat_list if x not in chat_list_not_none]
if len(chat_list_none) > 0 and len(chat_list_not_none) == 0:
if raise_if_none:
raise ValueError("Invalid chat file")
else:
chat_state1 = args_list[-1]
choices = list(chat_state1.keys()).copy()
return chat_state1, gr.update(choices=choices, value=None)
# dict with keys of short chat names, values of list of list of chatbot histories
chat_state1 = args_list[-1]
short_chats = list(chat_state1.keys())
if len(chat_list_not_none) > 0:
# make short_chat key from only first history, based upon question that is same anyways
chat_first = chat_list_not_none[0]
short_chat = get_short_chat(chat_first, short_chats)
if short_chat:
old_chat_lists = list(chat_state1.values())
already_exists = any([is_chat_same(chat_list, x) for x in old_chat_lists])
if not already_exists:
chat_state1[short_chat] = chat_list.copy()
# reverse so newest at top
choices = list(chat_state1.keys()).copy()
choices.reverse()
# save saved chats and chatbots to auth file
selection_docs_state1 = None
langchain_mode2 = None
text_output1 = chat_list[0]
text_output21 = chat_list[1]
text_outputs1 = chat_list[2:]
save_auth_func(selection_docs_state1, requests_state1,
chat_state1, langchain_mode2,
text_output1, text_output21, text_outputs1,
)
return chat_state1, gr.update(choices=choices, value=None)
def switch_chat(chat_key, chat_state1, num_model_lock=0):
chosen_chat = chat_state1[chat_key]
# deal with possible different size of chat list vs. current list
ret_chat = [None] * (2 + num_model_lock)
for chati in range(0, 2 + num_model_lock):
ret_chat[chati % len(ret_chat)] = chosen_chat[chati % len(chosen_chat)]
return tuple(ret_chat)
def clear_texts(*args):
return tuple([gr.Textbox(value='')] * len(args))
def clear_scores():
return gr.Textbox(value=res_value), \
gr.Textbox(value='Response Score: NA'), \
gr.Textbox(value='Response Score: NA')
switch_chat_fun = functools.partial(switch_chat, num_model_lock=len(text_outputs))
radio_chats.input(switch_chat_fun,
inputs=[radio_chats, chat_state],
outputs=[text_output, text_output2] + text_outputs) \
.then(clear_scores, outputs=[score_text, score_text2, score_text_nochat])
def remove_chat(chat_key, chat_state1):
if isinstance(chat_key, str):
chat_state1.pop(chat_key, None)
return gr.update(choices=list(chat_state1.keys()), value=None), chat_state1
remove_chat_event = remove_chat_btn.click(remove_chat,
inputs=[radio_chats, chat_state],
outputs=[radio_chats, chat_state],
queue=False, api_name='remove_chat')
def get_chats1(chat_state1):
base = 'chats'
base = makedirs(base, exist_ok=True, tmp_ok=True, use_base=True)
filename = os.path.join(base, 'chats_%s.json' % str(uuid.uuid4()))
with open(filename, "wt") as f:
f.write(json.dumps(chat_state1, indent=2))
return filename
export_chat_event = export_chats_btn.click(get_chats1, inputs=chat_state, outputs=chats_file, queue=False,
api_name='export_chats' if allow_api else None)
def add_chats_from_file(db1s, requests_state1, file, chat_state1, radio_chats1, chat_exception_text1,
auth_filename=None, auth_freeze=None):
if not file:
return None, chat_state1, gr.update(choices=list(chat_state1.keys()), value=None), chat_exception_text1
if isinstance(file, str):
files = [file]
else:
files = file
if not files:
return None, chat_state1, gr.update(choices=list(chat_state1.keys()), value=None), chat_exception_text1
chat_exception_list = []
for file1 in files:
try:
if hasattr(file1, 'name'):
file1 = file1.name
with open(file1, "rt") as f:
new_chats = json.loads(f.read())
for chat1_k, chat1_v in new_chats.items():
# ignore chat1_k, regenerate and de-dup to avoid loss
chat_state1, _ = save_chat(db1s, requests_state1, chat1_v, chat_state1, chat_is_list=True,
raise_if_none=True)
except BaseException as e:
t, v, tb = sys.exc_info()
ex = ''.join(traceback.format_exception(t, v, tb))
ex_str = "File %s exception: %s" % (file1, str(e))
print(ex_str, flush=True)
chat_exception_list.append(ex_str)
chat_exception_text1 = '\n'.join(chat_exception_list)
# save chat to auth file
selection_docs_state1 = None
langchain_mode2 = None
text_output1, text_output21, text_outputs1 = None, None, None
save_auth_func(selection_docs_state1, requests_state1,
chat_state1, langchain_mode2,
text_output1, text_output21, text_outputs1,
)
return None, chat_state1, gr.update(choices=list(chat_state1.keys()), value=None), chat_exception_text1
# note for update_user_db_func output is ignored for db
chatup_change_eventa = chatsup_output.change(user_state_setup,
inputs=[my_db_state, requests_state, langchain_mode],
outputs=[my_db_state, requests_state, langchain_mode],
show_progress='minimal')
add_chats_from_file_func = functools.partial(add_chats_from_file,
auth_filename=kwargs['auth_filename'],
auth_freeze=kwargs['auth_freeze'],
)
chatup_change_event = chatup_change_eventa.then(add_chats_from_file_func,
inputs=[my_db_state, requests_state] +
[chatsup_output, chat_state, radio_chats,
chat_exception_text],
outputs=[chatsup_output, chat_state, radio_chats,
chat_exception_text],
queue=False,
api_name='add_to_chats' if allow_api else None)
clear_chat_event = clear_chat_btn.click(fn=clear_texts,
inputs=[text_output, text_output2] + text_outputs,
outputs=[text_output, text_output2] + text_outputs,
queue=False, api_name='clear' if allow_api else None) \
.then(deselect_radio_chats, inputs=None, outputs=radio_chats, queue=False) \
.then(clear_scores, outputs=[score_text, score_text2, score_text_nochat])
clear_eventa = save_chat_btn.click(user_state_setup,
inputs=[my_db_state, requests_state, langchain_mode],
outputs=[my_db_state, requests_state, langchain_mode],
show_progress='minimal')
save_chat_func = functools.partial(save_chat,
auth_filename=kwargs['auth_filename'],
auth_freeze=kwargs['auth_freeze'],
raise_if_none=False,
)
clear_event = clear_eventa.then(save_chat_func,
inputs=[my_db_state, requests_state] +
[text_output, text_output2] + text_outputs +
[chat_state],
outputs=[chat_state, radio_chats],
api_name='save_chat' if allow_api else None)
if kwargs['score_model']:
clear_event2 = clear_event.then(clear_scores, outputs=[score_text, score_text2, score_text_nochat])
# NOTE: clear of instruction/iinput for nochat has to come after score,
# because score for nochat consumes actual textbox, while chat consumes chat history filled by user()
no_chat_args = dict(fn=fun,
inputs=[model_state, my_db_state, selection_docs_state, requests_state] + inputs_list,
outputs=text_output_nochat,
queue=queue,
)
submit_event_nochat = submit_nochat.click(**no_chat_args, api_name='submit_nochat' if allow_api else None) \
.then(clear_torch_cache) \
.then(**score_args_nochat, api_name='instruction_bot_score_nochat' if allow_api else None, queue=queue) \
.then(clear_instruct, None, instruction_nochat) \
.then(clear_instruct, None, iinput_nochat) \
.then(clear_torch_cache)
# copy of above with text box submission
submit_event_nochat2 = instruction_nochat.submit(**no_chat_args) \
.then(clear_torch_cache) \
.then(**score_args_nochat, queue=queue) \
.then(clear_instruct, None, instruction_nochat) \
.then(clear_instruct, None, iinput_nochat) \
.then(clear_torch_cache)
submit_event_nochat_api = submit_nochat_api.click(fun_with_dict_str,
inputs=[model_state, my_db_state, selection_docs_state,
requests_state,
inputs_dict_str],
outputs=text_output_nochat_api,
queue=True, # required for generator
api_name='submit_nochat_api' if allow_api else None)
submit_event_nochat_api_plain = submit_nochat_api_plain.click(fun_with_dict_str_plain,
inputs=inputs_dict_str,
outputs=text_output_nochat_api,
queue=False,
api_name='submit_nochat_plain_api' if allow_api else None)
def load_model(model_name, lora_weights, server_name, model_state_old, prompt_type_old,
load_8bit, load_4bit, low_bit_mode,
load_gptq, load_awq, load_exllama, use_safetensors, revision,
use_gpu_id, gpu_id, max_seq_len1, rope_scaling1,
model_path_llama1, model_name_gptj1, model_name_gpt4all_llama1,
n_gpu_layers1, n_batch1, n_gqa1, llamacpp_dict_more1,
system_prompt1):
try:
llamacpp_dict = ast.literal_eval(llamacpp_dict_more1)
except:
print("Failed to use user input for llamacpp_dict_more1 dict", flush=True)
llamacpp_dict = {}
llamacpp_dict.update(dict(model_path_llama=model_path_llama1,
model_name_gptj=model_name_gptj1,
model_name_gpt4all_llama=model_name_gpt4all_llama1,
n_gpu_layers=n_gpu_layers1,
n_batch=n_batch1,
n_gqa=n_gqa1,
))
# ensure no API calls reach here
if is_public:
raise RuntimeError("Illegal access for %s" % model_name)
# ensure old model removed from GPU memory
if kwargs['debug']:
print("Pre-switch pre-del GPU memory: %s" % get_torch_allocated(), flush=True)
model0 = model_state0['model']
if isinstance(model_state_old['model'], str) and \
model0 is not None and \
hasattr(model0, 'cpu'):
# best can do, move model loaded at first to CPU
model0.cpu()
if model_state_old['model'] is not None and \
not isinstance(model_state_old['model'], str):
if hasattr(model_state_old['model'], 'cpu'):
try:
model_state_old['model'].cpu()
except Exception as e:
# sometimes hit NotImplementedError: Cannot copy out of meta tensor; no data!
print("Unable to put model on CPU: %s" % str(e), flush=True)
del model_state_old['model']
model_state_old['model'] = None
if model_state_old['tokenizer'] is not None and not isinstance(model_state_old['tokenizer'], str):
del model_state_old['tokenizer']
model_state_old['tokenizer'] = None
clear_torch_cache()
if kwargs['debug']:
print("Pre-switch post-del GPU memory: %s" % get_torch_allocated(), flush=True)
if not model_name:
model_name = no_model_str
if model_name == no_model_str:
# no-op if no model, just free memory
# no detranscribe needed for model, never go into evaluate
lora_weights = no_lora_str
server_name = no_server_str
return kwargs['model_state_none'].copy(), \
model_name, lora_weights, server_name, prompt_type_old, \
gr.Slider(maximum=256), \
gr.Slider(maximum=256)
# don't deepcopy, can contain model itself
all_kwargs1 = all_kwargs.copy()
all_kwargs1['base_model'] = model_name.strip()
all_kwargs1['load_8bit'] = load_8bit
all_kwargs1['load_4bit'] = load_4bit
all_kwargs1['low_bit_mode'] = low_bit_mode
all_kwargs1['load_gptq'] = load_gptq
all_kwargs1['load_awq'] = load_awq
all_kwargs1['load_exllama'] = load_exllama
all_kwargs1['use_safetensors'] = use_safetensors
all_kwargs1['revision'] = None if not revision else revision # transcribe, don't pass ''
all_kwargs1['use_gpu_id'] = use_gpu_id
all_kwargs1['gpu_id'] = int(gpu_id) if gpu_id not in [None, 'None'] else None # detranscribe
all_kwargs1['llamacpp_dict'] = llamacpp_dict
all_kwargs1['exllama_dict'] = kwargs['exllama_dict'] # no control in UI/API yet
all_kwargs1['gptq_dict'] = kwargs['gptq_dict'] # no control in UI/API yet
all_kwargs1['attention_sinks'] = kwargs['attention_sinks'] # no control in UI/API yet
all_kwargs1['sink_dict'] = kwargs['sink_dict'] # no control in UI/API yet
all_kwargs1['truncation_generation'] = kwargs['truncation_generation'] # no control in UI/API yet
all_kwargs1['hf_model_dict'] = kwargs['hf_model_dict'] # no control in UI/API yet
all_kwargs1['max_seq_len'] = max_seq_len1
try:
all_kwargs1['rope_scaling'] = str_to_dict(rope_scaling1) # transcribe
except:
print("Failed to use user input for rope_scaling dict", flush=True)
all_kwargs1['rope_scaling'] = {}
model_lower = model_name.strip().lower()
if model_lower in inv_prompt_type_to_model_lower:
prompt_type1 = inv_prompt_type_to_model_lower[model_lower]
else:
prompt_type1 = prompt_type_old
# detranscribe
if lora_weights == no_lora_str:
lora_weights = ''
all_kwargs1['lora_weights'] = lora_weights.strip()
if server_name == no_server_str:
server_name = ''
all_kwargs1['inference_server'] = server_name.strip()
model1, tokenizer1, device1 = get_model(reward_type=False,
**get_kwargs(get_model, exclude_names=['reward_type'],
**all_kwargs1))
clear_torch_cache()
tokenizer_base_model = model_name
prompt_dict1, error0 = get_prompt(prompt_type1, '',
chat=False, context='', reduced=False, making_context=False,
return_dict=True, system_prompt=system_prompt1)
model_state_new = dict(model=model1, tokenizer=tokenizer1, device=device1,
base_model=model_name, tokenizer_base_model=tokenizer_base_model,
lora_weights=lora_weights, inference_server=server_name,
prompt_type=prompt_type1, prompt_dict=prompt_dict1,
# FIXME: not typically required, unless want to expose adding h2ogpt endpoint in UI
visible_models=None, h2ogpt_key=None,
)
max_max_new_tokens1 = get_max_max_new_tokens(model_state_new, **kwargs)
if kwargs['debug']:
print("Post-switch GPU memory: %s" % get_torch_allocated(), flush=True)
return model_state_new, model_name, lora_weights, server_name, prompt_type1, \
gr.Slider(maximum=max_max_new_tokens1), \
gr.Slider(maximum=max_max_new_tokens1)
def get_prompt_str(prompt_type1, prompt_dict1, system_prompt1, which=0):
if prompt_type1 in ['', None]:
print("Got prompt_type %s: %s" % (which, prompt_type1), flush=True)
return str({})
prompt_dict1, prompt_dict_error = get_prompt(prompt_type1, prompt_dict1, chat=False, context='',
reduced=False, making_context=False, return_dict=True,
system_prompt=system_prompt1)
if prompt_dict_error:
return str(prompt_dict_error)
else:
# return so user can manipulate if want and use as custom
return str(prompt_dict1)
get_prompt_str_func1 = functools.partial(get_prompt_str, which=1)
get_prompt_str_func2 = functools.partial(get_prompt_str, which=2)
prompt_type.change(fn=get_prompt_str_func1, inputs=[prompt_type, prompt_dict, system_prompt],
outputs=prompt_dict, queue=False)
prompt_type2.change(fn=get_prompt_str_func2, inputs=[prompt_type2, prompt_dict2, system_prompt],
outputs=prompt_dict2,
queue=False)
def dropdown_prompt_type_list(x):
return gr.Dropdown(value=x)
def chatbot_list(x, model_used_in):
return gr.Textbox(label=f'h2oGPT [Model: {model_used_in}]')
load_model_args = dict(fn=load_model,
inputs=[model_choice, lora_choice, server_choice, model_state, prompt_type,
model_load8bit_checkbox, model_load4bit_checkbox, model_low_bit_mode,
model_load_gptq, model_load_awq, model_load_exllama_checkbox,
model_safetensors_checkbox, model_revision,
model_use_gpu_id_checkbox, model_gpu,
max_seq_len, rope_scaling,
model_path_llama, model_name_gptj, model_name_gpt4all_llama,
n_gpu_layers, n_batch, n_gqa, llamacpp_dict_more,
system_prompt],
outputs=[model_state, model_used, lora_used, server_used,
# if prompt_type changes, prompt_dict will change via change rule
prompt_type, max_new_tokens, min_new_tokens,
])
prompt_update_args = dict(fn=dropdown_prompt_type_list, inputs=prompt_type, outputs=prompt_type)
chatbot_update_args = dict(fn=chatbot_list, inputs=[text_output, model_used], outputs=text_output)
nochat_update_args = dict(fn=chatbot_list, inputs=[text_output_nochat, model_used], outputs=text_output_nochat)
load_model_event = load_model_button.click(**load_model_args,
api_name='load_model' if allow_api and not is_public else None) \
.then(**prompt_update_args) \
.then(**chatbot_update_args) \
.then(**nochat_update_args) \
.then(clear_torch_cache)
load_model_args2 = dict(fn=load_model,
inputs=[model_choice2, lora_choice2, server_choice2, model_state2, prompt_type2,
model_load8bit_checkbox2, model_load4bit_checkbox2, model_low_bit_mode2,
model_load_gptq2, model_load_awq2, model_load_exllama_checkbox2,
model_safetensors_checkbox2, model_revision2,
model_use_gpu_id_checkbox2, model_gpu2,
max_seq_len2, rope_scaling2,
model_path_llama2, model_name_gptj2, model_name_gpt4all_llama2,
n_gpu_layers2, n_batch2, n_gqa2, llamacpp_dict_more2,
system_prompt],
outputs=[model_state2, model_used2, lora_used2, server_used2,
# if prompt_type2 changes, prompt_dict2 will change via change rule
prompt_type2, max_new_tokens2, min_new_tokens2
])
prompt_update_args2 = dict(fn=dropdown_prompt_type_list, inputs=prompt_type2, outputs=prompt_type2)
chatbot_update_args2 = dict(fn=chatbot_list, inputs=[text_output2, model_used2], outputs=text_output2)
load_model_event2 = load_model_button2.click(**load_model_args2,
api_name='load_model2' if allow_api and not is_public else None) \
.then(**prompt_update_args2) \
.then(**chatbot_update_args2) \
.then(clear_torch_cache)
def dropdown_model_lora_server_list(model_list0, model_x,
lora_list0, lora_x,
server_list0, server_x,
model_used1, lora_used1, server_used1,
model_used2, lora_used2, server_used2,
):
model_new_state = [model_list0[0] + [model_x]]
model_new_options = [*model_new_state[0]]
if no_model_str in model_new_options:
model_new_options.remove(no_model_str)
model_new_options = [no_model_str] + sorted(model_new_options)
x1 = model_x if model_used1 == no_model_str else model_used1
x2 = model_x if model_used2 == no_model_str else model_used2
ret1 = [gr.Dropdown(value=x1, choices=model_new_options),
gr.Dropdown(value=x2, choices=model_new_options),
'', model_new_state]
lora_new_state = [lora_list0[0] + [lora_x]]
lora_new_options = [*lora_new_state[0]]
if no_lora_str in lora_new_options:
lora_new_options.remove(no_lora_str)
lora_new_options = [no_lora_str] + sorted(lora_new_options)
# don't switch drop-down to added lora if already have model loaded
x1 = lora_x if model_used1 == no_model_str else lora_used1
x2 = lora_x if model_used2 == no_model_str else lora_used2
ret2 = [gr.Dropdown(value=x1, choices=lora_new_options),
gr.Dropdown(value=x2, choices=lora_new_options),
'', lora_new_state]
server_new_state = [server_list0[0] + [server_x]]
server_new_options = [*server_new_state[0]]
if no_server_str in server_new_options:
server_new_options.remove(no_server_str)
server_new_options = [no_server_str] + sorted(server_new_options)
# don't switch drop-down to added server if already have model loaded
x1 = server_x if model_used1 == no_model_str else server_used1
x2 = server_x if model_used2 == no_model_str else server_used2
ret3 = [gr.Dropdown(value=x1, choices=server_new_options),
gr.Dropdown(value=x2, choices=server_new_options),
'', server_new_state]
return tuple(ret1 + ret2 + ret3)
add_model_lora_server_event = \
add_model_lora_server_button.click(fn=dropdown_model_lora_server_list,
inputs=[model_options_state, new_model] +
[lora_options_state, new_lora] +
[server_options_state, new_server] +
[model_used, lora_used, server_used] +
[model_used2, lora_used2, server_used2],
outputs=[model_choice, model_choice2, new_model, model_options_state] +
[lora_choice, lora_choice2, new_lora, lora_options_state] +
[server_choice, server_choice2, new_server,
server_options_state],
queue=False)
go_event = go_btn.click(lambda: gr.update(visible=False), None, go_btn, api_name="go" if allow_api else None,
queue=False) \
.then(lambda: gr.update(visible=True), None, normal_block, queue=False) \
.then(**load_model_args, queue=False).then(**prompt_update_args, queue=False)
def compare_textbox_fun(x):
return gr.Textbox(visible=x)
def compare_column_fun(x):
return gr.Column.update(visible=x)
def compare_prompt_fun(x):
return gr.Dropdown(visible=x)
def slider_fun(x):
return gr.Slider(visible=x)
compare_checkbox.select(compare_textbox_fun, compare_checkbox, text_output2,
api_name="compare_checkbox" if allow_api else None) \
.then(compare_column_fun, compare_checkbox, col_model2) \
.then(compare_prompt_fun, compare_checkbox, prompt_type2) \
.then(compare_textbox_fun, compare_checkbox, score_text2) \
.then(slider_fun, compare_checkbox, max_new_tokens2) \
.then(slider_fun, compare_checkbox, min_new_tokens2)
# FIXME: add score_res2 in condition, but do better
# callback for logging flagged input/output
callback.setup(inputs_list + [text_output, text_output2] + text_outputs, "flagged_data_points")
flag_btn.click(lambda *args: callback.flag(args), inputs_list + [text_output, text_output2] + text_outputs,
None,
preprocess=False,
api_name='flag' if allow_api else None, queue=False)
flag_btn_nochat.click(lambda *args: callback.flag(args), inputs_list + [text_output_nochat], None,
preprocess=False,
api_name='flag_nochat' if allow_api else None, queue=False)
def get_system_info():
if is_public:
time.sleep(10) # delay to avoid spam since queue=False
return gr.Textbox(value=system_info_print())
system_event = system_btn.click(get_system_info, outputs=system_text,
api_name='system_info' if allow_api else None, queue=False)
def get_system_info_dict(system_input1, **kwargs1):
if system_input1 != os.getenv("ADMIN_PASS", ""):
return json.dumps({})
exclude_list = ['admin_pass', 'examples']
sys_dict = {k: v for k, v in kwargs1.items() if
isinstance(v, (str, int, bool, float)) and k not in exclude_list}
try:
sys_dict.update(system_info())
except Exception as e:
# protection
print("Exception: %s" % str(e), flush=True)
return json.dumps(sys_dict)
system_kwargs = all_kwargs.copy()
system_kwargs.update(dict(command=str(' '.join(sys.argv))))
get_system_info_dict_func = functools.partial(get_system_info_dict, **all_kwargs)
system_dict_event = system_btn2.click(get_system_info_dict_func,
inputs=system_input,
outputs=system_text2,
api_name='system_info_dict' if allow_api else None,
queue=False, # queue to avoid spam
)
def get_hash():
return kwargs['git_hash']
system_event = system_btn3.click(get_hash,
outputs=system_text3,
api_name='system_hash' if allow_api else None,
queue=False,
)
def get_model_names():
key_list = ['base_model', 'prompt_type', 'prompt_dict'] + list(kwargs['other_model_state_defaults'].keys())
# don't want to expose backend inference server IP etc.
# key_list += ['inference_server']
if len(model_states) >= 1:
local_model_states = model_states
elif model_state0 is not None:
local_model_states = [model_state0]
else:
local_model_states = []
return [{k: x[k] for k in key_list if k in x} for x in local_model_states]
models_list_event = system_btn4.click(get_model_names,
outputs=system_text4,
api_name='model_names' if allow_api else None,
queue=False,
)
def count_chat_tokens(model_state1, chat1, prompt_type1, prompt_dict1,
system_prompt1, chat_conversation1,
memory_restriction_level1=0,
keep_sources_in_context1=False,
):
if model_state1 and not isinstance(model_state1['tokenizer'], str):
tokenizer = model_state1['tokenizer']
elif model_state0 and not isinstance(model_state0['tokenizer'], str):
tokenizer = model_state0['tokenizer']
else:
tokenizer = None
if tokenizer is not None:
langchain_mode1 = 'LLM'
add_chat_history_to_context1 = True
# fake user message to mimic bot()
chat1 = copy.deepcopy(chat1)
chat1 = chat1 + [['user_message1', None]]
model_max_length1 = tokenizer.model_max_length
context1 = history_to_context(chat1,
langchain_mode=langchain_mode1,
add_chat_history_to_context=add_chat_history_to_context1,
prompt_type=prompt_type1,
prompt_dict=prompt_dict1,
chat=True,
model_max_length=model_max_length1,
memory_restriction_level=memory_restriction_level1,
keep_sources_in_context=keep_sources_in_context1,
system_prompt=system_prompt1,
chat_conversation=chat_conversation1)
tokens = tokenizer(context1, return_tensors="pt")['input_ids']
if len(tokens.shape) == 1:
return str(tokens.shape[0])
elif len(tokens.shape) == 2:
return str(tokens.shape[1])
else:
return "N/A"
else:
return "N/A"
count_chat_tokens_func = functools.partial(count_chat_tokens,
memory_restriction_level1=memory_restriction_level,
keep_sources_in_context1=kwargs['keep_sources_in_context'])
count_tokens_event = count_chat_tokens_btn.click(fn=count_chat_tokens_func,
inputs=[model_state, text_output, prompt_type, prompt_dict,
system_prompt, chat_conversation],
outputs=chat_token_count,
api_name='count_tokens' if allow_api else None)
# don't pass text_output, don't want to clear output, just stop it
# cancel only stops outer generation, not inner generation or non-generation
stop_btn.click(lambda: None, None, None,
cancels=submits1 + submits2 + submits3 + submits4 +
[submit_event_nochat, submit_event_nochat2] +
[eventdb1, eventdb2, eventdb3] +
[eventdb7a, eventdb7, eventdb8a, eventdb8, eventdb9a, eventdb9, eventdb12a, eventdb12] +
db_events +
[eventdbloadla, eventdbloadlb] +
[clear_event] +
[submit_event_nochat_api, submit_event_nochat] +
[load_model_event, load_model_event2] +
[count_tokens_event]
,
queue=False, api_name='stop' if allow_api else None).then(clear_torch_cache, queue=False)
if kwargs['auth'] is not None:
auth = authf
load_func = user_state_setup
load_inputs = [my_db_state, requests_state, login_btn, login_btn]
load_outputs = [my_db_state, requests_state, login_btn]
else:
auth = None
load_func, load_inputs, load_outputs = None, None, None
app_js = wrap_js_to_lambda(
len(load_inputs) if load_inputs else 0,
get_dark_js() if kwargs['dark'] else None,
get_heap_js(heap_app_id) if is_heap_analytics_enabled else None)
load_event = demo.load(fn=load_func, inputs=load_inputs, outputs=load_outputs, _js=app_js)
if load_func:
load_event2 = load_event.then(load_login_func,
inputs=login_inputs,
outputs=login_outputs)
if not kwargs['large_file_count_mode']:
load_event3 = load_event2.then(**get_sources_kwargs)
load_event4 = load_event3.then(fn=update_dropdown, inputs=docs_state, outputs=document_choice)
load_event5 = load_event4.then(**show_sources_kwargs)
load_event6 = load_event5.then(**get_viewable_sources_args)
load_event7 = load_event6.then(**viewable_kwargs)
demo.queue(concurrency_count=kwargs['concurrency_count'], api_open=kwargs['api_open'])
favicon_file = "h2o-logo.svg"
favicon_path = favicon_file
if not os.path.isfile(favicon_file):
print("favicon_path1=%s not found" % favicon_file, flush=True)
alt_path = os.path.dirname(os.path.abspath(__file__))
favicon_path = os.path.join(alt_path, favicon_file)
if not os.path.isfile(favicon_path):
print("favicon_path2: %s not found in %s" % (favicon_file, alt_path), flush=True)
alt_path = os.path.dirname(alt_path)
favicon_path = os.path.join(alt_path, favicon_file)
if not os.path.isfile(favicon_path):
print("favicon_path3: %s not found in %s" % (favicon_file, alt_path), flush=True)
favicon_path = None
if kwargs['prepare_offline_level'] > 0:
from src.prepare_offline import go_prepare_offline
go_prepare_offline(**locals())
return
scheduler = BackgroundScheduler()
scheduler.add_job(func=clear_torch_cache, trigger="interval", seconds=20)
if is_public and \
kwargs['base_model'] not in non_hf_types:
# FIXME: disable for gptj, langchain or gpt4all modify print itself
# FIXME: and any multi-threaded/async print will enter model output!
scheduler.add_job(func=ping, trigger="interval", seconds=60)
if is_public or os.getenv('PING_GPU'):
scheduler.add_job(func=ping_gpu, trigger="interval", seconds=60 * 10)
scheduler.start()
# import control
if kwargs['langchain_mode'] == 'Disabled' and \
os.environ.get("TEST_LANGCHAIN_IMPORT") and \
kwargs['base_model'] not in non_hf_types:
assert 'gpt_langchain' not in sys.modules, "Dev bug, import of langchain when should not have"
assert 'langchain' not in sys.modules, "Dev bug, import of langchain when should not have"
# set port in case GRADIO_SERVER_PORT was already set in prior main() call,
# gradio does not listen if change after import
# Keep None if not set so can find an open port above used ports
server_port = os.getenv('GRADIO_SERVER_PORT')
if server_port is not None:
server_port = int(server_port)
demo.launch(share=kwargs['share'],
server_name=kwargs['server_name'],
show_error=True,
server_port=server_port,
favicon_path=favicon_path,
prevent_thread_lock=True,
auth=auth,
auth_message=auth_message,
root_path=kwargs['root_path'],
ssl_keyfile=kwargs['ssl_keyfile'],
ssl_verify=kwargs['ssl_verify'],
ssl_certfile=kwargs['ssl_certfile'],
ssl_keyfile_password=kwargs['ssl_keyfile_password'],
)
if kwargs['verbose'] or not (kwargs['base_model'] in ['gptj', 'gpt4all_llama']):
print("Started Gradio Server and/or GUI: server_name: %s port: %s" % (kwargs['server_name'], server_port),
flush=True)
if kwargs['open_browser']:
# Open URL in a new tab, if a browser window is already open.
if server_port is None:
server_port = '7860'
import webbrowser
webbrowser.open_new_tab('http://localhost:%s' % server_port)
if kwargs['block_gradio_exit']:
demo.block_thread()
def show_doc(db1s, selection_docs_state1, requests_state1,
langchain_mode1,
single_document_choice1,
view_raw_text_checkbox1,
text_context_list1,
dbs1=None,
load_db_if_exists1=None,
db_type1=None,
use_openai_embedding1=None,
hf_embedding_model1=None,
migrate_embedding_model_or_db1=None,
auto_migrate_db1=None,
verbose1=False,
get_userid_auth1=None,
max_raw_chunks=1000000,
api=False,
n_jobs=-1):
file = single_document_choice1
document_choice1 = [single_document_choice1]
content = None
db_documents = []
db_metadatas = []
if db_type1 in ['chroma', 'chroma_old']:
assert langchain_mode1 is not None
langchain_mode_paths = selection_docs_state1['langchain_mode_paths']
langchain_mode_types = selection_docs_state1['langchain_mode_types']
from src.gpt_langchain import set_userid, get_any_db, get_docs_and_meta
set_userid(db1s, requests_state1, get_userid_auth1)
top_k_docs = -1
db = get_any_db(db1s, langchain_mode1, langchain_mode_paths, langchain_mode_types,
dbs=dbs1,
load_db_if_exists=load_db_if_exists1,
db_type=db_type1,
use_openai_embedding=use_openai_embedding1,
hf_embedding_model=hf_embedding_model1,
migrate_embedding_model=migrate_embedding_model_or_db1,
auto_migrate_db=auto_migrate_db1,
for_sources_list=True,
verbose=verbose1,
n_jobs=n_jobs,
)
query_action = False # long chunks like would be used for summarize
# the below is as or filter, so will show doc or by chunk, unrestricted
from langchain.vectorstores import Chroma
if isinstance(db, Chroma):
# chroma >= 0.4
if view_raw_text_checkbox1:
one_filter = \
[{"source": {"$eq": x}, "chunk_id": {"$gte": 0}} if query_action else {"source": {"$eq": x},
"chunk_id": {
"$gte": -1}}
for x in document_choice1][0]
else:
one_filter = \
[{"source": {"$eq": x}, "chunk_id": {"$gte": 0}} if query_action else {"source": {"$eq": x},
"chunk_id": {
"$eq": -1}}
for x in document_choice1][0]
filter_kwargs = dict(filter={"$and": [dict(source=one_filter['source']),
dict(chunk_id=one_filter['chunk_id'])]})
else:
# migration for chroma < 0.4
one_filter = \
[{"source": {"$eq": x}, "chunk_id": {"$gte": 0}} if query_action else {"source": {"$eq": x},
"chunk_id": {
"$eq": -1}}
for x in document_choice1][0]
if view_raw_text_checkbox1:
# like or, full raw all chunk types
filter_kwargs = dict(filter=one_filter)
else:
filter_kwargs = dict(filter={"$and": [dict(source=one_filter['source']),
dict(chunk_id=one_filter['chunk_id'])]})
db_documents, db_metadatas = get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs,
text_context_list=text_context_list1)
# order documents
from langchain.docstore.document import Document
docs_with_score = [(Document(page_content=result[0], metadata=result[1] or {}), 0)
for result in zip(db_documents, db_metadatas)]
doc_chunk_ids = [x.get('chunk_id', -1) for x in db_metadatas]
doc_page_ids = [x.get('page', 0) for x in db_metadatas]
doc_hashes = [x.get('doc_hash', 'None') for x in db_metadatas]
docs_with_score = [x for hx, px, cx, x in
sorted(zip(doc_hashes, doc_page_ids, doc_chunk_ids, docs_with_score),
key=lambda x: (x[0], x[1], x[2]))
# if cx == -1
]
db_metadatas = [x[0].metadata for x in docs_with_score][:max_raw_chunks]
db_documents = [x[0].page_content for x in docs_with_score][:max_raw_chunks]
# done reordering
if view_raw_text_checkbox1:
content = [dict_to_html(x) + '\n' + text_to_html(y) for x, y in zip(db_metadatas, db_documents)]
else:
content = [text_to_html(y) for x, y in zip(db_metadatas, db_documents)]
content = '\n'.join(content)
content = f"""<!DOCTYPE html>
<html>
<head>
<title>{file}</title>
</head>
<body>
{content}
</body>
</html>"""
if api:
if view_raw_text_checkbox1:
return dict(contents=db_documents, metadatas=db_metadatas)
else:
contents = [text_to_html(y, api=api) for y in db_documents]
metadatas = [dict_to_html(x, api=api) for x in db_metadatas]
return dict(contents=contents, metadatas=metadatas)
else:
assert not api, "API mode for get_document only supported for chroma"
dummy1 = gr.update(visible=False, value=None)
# backup is text dump of db version
if content:
dummy_ret = dummy1, dummy1, dummy1, dummy1, gr.update(visible=True, value=content)
if view_raw_text_checkbox1:
return dummy_ret
else:
dummy_ret = dummy1, dummy1, dummy1, dummy1, dummy1
if not isinstance(file, str):
return dummy_ret
if file.lower().endswith('.html') or file.lower().endswith('.mhtml') or file.lower().endswith('.htm') or \
file.lower().endswith('.xml'):
try:
with open(file, 'rt') as f:
content = f.read()
return gr.update(visible=True, value=content), dummy1, dummy1, dummy1, dummy1
except:
return dummy_ret
if file.lower().endswith('.md'):
try:
with open(file, 'rt') as f:
content = f.read()
return dummy1, dummy1, dummy1, gr.update(visible=True, value=content), dummy1
except:
return dummy_ret
if file.lower().endswith('.py'):
try:
with open(file, 'rt') as f:
content = f.read()
content = f"```python\n{content}\n```"
return dummy1, dummy1, dummy1, gr.update(visible=True, value=content), dummy1
except:
return dummy_ret
if file.lower().endswith('.txt') or file.lower().endswith('.rst') or file.lower().endswith(
'.rtf') or file.lower().endswith('.toml'):
try:
with open(file, 'rt') as f:
content = f.read()
content = f"```text\n{content}\n```"
return dummy1, dummy1, dummy1, gr.update(visible=True, value=content), dummy1
except:
return dummy_ret
func = None
if file.lower().endswith(".csv"):
func = pd.read_csv
elif file.lower().endswith(".pickle"):
func = pd.read_pickle
elif file.lower().endswith(".xls") or file.lower().endswith("xlsx"):
func = pd.read_excel
elif file.lower().endswith('.json'):
func = pd.read_json
# pandas doesn't show full thing, even if html view shows broken things still better
# elif file.lower().endswith('.xml'):
# func = pd.read_xml
if func is not None:
try:
df = func(file).head(100)
except:
return dummy_ret
return dummy1, gr.update(visible=True, value=df), dummy1, dummy1, dummy1
port = int(os.getenv('GRADIO_SERVER_PORT', '7860'))
import pathlib
absolute_path_string = os.path.abspath(file)
url_path = pathlib.Path(absolute_path_string).as_uri()
url = get_url(absolute_path_string, from_str=True)
img_url = url.replace("""<a href=""", """<img src=""")
if file.lower().endswith('.png') or file.lower().endswith('.jpg') or file.lower().endswith('.jpeg'):
return gr.update(visible=True, value=img_url), dummy1, dummy1, dummy1, dummy1
elif file.lower().endswith('.pdf') or 'arxiv.org/pdf' in file:
# account for when use `wget -b -m -k -o wget.log -e robots=off`
if url_alive('http://' + file):
file = 'http://' + file
if url_alive('https://' + file):
file = 'https://' + file
if file.lower().startswith('http') or file.lower().startswith('https'):
# if file is online, then might as well use google(?)
document1 = file
return gr.update(visible=True,
value=f"""<iframe width="1000" height="800" src="https://docs.google.com/viewerng/viewer?url={document1}&embedded=true" frameborder="0" height="100%" width="100%">
</iframe>
"""), dummy1, dummy1, dummy1, dummy1
else:
# FIXME: This doesn't work yet, just return dummy result for now
if False:
ip = get_local_ip()
document1 = url_path.replace('file://', f'http://{ip}:{port}/')
# document1 = url
return gr.update(visible=True, value=f"""<object data="{document1}" type="application/pdf">
<iframe src="https://docs.google.com/viewer?url={document1}&embedded=true"></iframe>
</object>"""), dummy1, dummy1, dummy1, dummy1
else:
return dummy_ret
else:
return dummy_ret
def get_inputs_list(inputs_dict, model_lower, model_id=1):
"""
map gradio objects in locals() to inputs for evaluate().
:param inputs_dict:
:param model_lower:
:param model_id: Which model (1 or 2) of 2
:return:
"""
inputs_list_names = list(inspect.signature(evaluate).parameters)
inputs_list = []
inputs_dict_out = {}
for k in inputs_list_names:
if k == 'kwargs':
continue
if k in input_args_list + inputs_kwargs_list:
# these are added at use time for args or partial for kwargs, not taken as input
continue
if 'mbart-' not in model_lower and k in ['src_lang', 'tgt_lang']:
continue
if model_id == 2:
if k == 'prompt_type':
k = 'prompt_type2'
if k == 'prompt_used':
k = 'prompt_used2'
if k == 'max_new_tokens':
k = 'max_new_tokens2'
if k == 'min_new_tokens':
k = 'min_new_tokens2'
inputs_list.append(inputs_dict[k])
inputs_dict_out[k] = inputs_dict[k]
return inputs_list, inputs_dict_out
def update_user_db_gr(file, db1s, selection_docs_state1, requests_state1,
langchain_mode, chunk, chunk_size, embed,
image_loaders,
pdf_loaders,
url_loaders,
jq_schema,
h2ogpt_key,
captions_model=None,
caption_loader=None,
doctr_loader=None,
dbs=None,
get_userid_auth=None,
**kwargs):
valid_key = is_valid_key(kwargs.pop('enforce_h2ogpt_api_key', None),
kwargs.pop('enforce_h2ogpt_ui_key', None),
kwargs.pop('h2ogpt_api_keys', []), h2ogpt_key,
requests_state1=requests_state1)
if not valid_key:
raise ValueError(invalid_key_msg)
loaders_dict, captions_model = gr_to_lg(image_loaders,
pdf_loaders,
url_loaders,
captions_model=captions_model,
**kwargs,
)
if jq_schema is None:
jq_schema = kwargs['jq_schema0']
loaders_dict.update(dict(captions_model=captions_model,
caption_loader=caption_loader,
doctr_loader=doctr_loader,
jq_schema=jq_schema,
))
kwargs.pop('image_loaders_options0', None)
kwargs.pop('pdf_loaders_options0', None)
kwargs.pop('url_loaders_options0', None)
kwargs.pop('jq_schema0', None)
if not embed:
kwargs['use_openai_embedding'] = False
kwargs['hf_embedding_model'] = 'fake'
kwargs['migrate_embedding_model'] = False
from src.gpt_langchain import update_user_db
return update_user_db(file, db1s, selection_docs_state1, requests_state1,
langchain_mode=langchain_mode, chunk=chunk, chunk_size=chunk_size,
**loaders_dict,
dbs=dbs,
get_userid_auth=get_userid_auth,
**kwargs)
def get_sources_gr(db1s, selection_docs_state1, requests_state1, langchain_mode, dbs=None, docs_state0=None,
load_db_if_exists=None,
db_type=None,
use_openai_embedding=None,
hf_embedding_model=None,
migrate_embedding_model=None,
auto_migrate_db=None,
verbose=False,
get_userid_auth=None,
api=False,
n_jobs=-1):
from src.gpt_langchain import get_sources
sources_file, source_list, num_chunks, num_sources_str, db = \
get_sources(db1s, selection_docs_state1, requests_state1, langchain_mode,
dbs=dbs, docs_state0=docs_state0,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
verbose=verbose,
get_userid_auth=get_userid_auth,
n_jobs=n_jobs,
)
if api:
return source_list
if langchain_mode in langchain_modes_non_db:
doc_counts_str = "LLM Mode\nNo Collection"
else:
doc_counts_str = "Collection: %s\nDocs: %s\nChunks: %d" % (langchain_mode, num_sources_str, num_chunks)
return sources_file, source_list, doc_counts_str
def get_source_files_given_langchain_mode_gr(db1s, selection_docs_state1, requests_state1,
langchain_mode,
dbs=None,
load_db_if_exists=None,
db_type=None,
use_openai_embedding=None,
hf_embedding_model=None,
migrate_embedding_model=None,
auto_migrate_db=None,
verbose=False,
get_userid_auth=None,
n_jobs=-1):
from src.gpt_langchain import get_source_files_given_langchain_mode
return get_source_files_given_langchain_mode(db1s, selection_docs_state1, requests_state1, None,
langchain_mode,
dbs=dbs,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
verbose=verbose,
get_userid_auth=get_userid_auth,
delete_sources=False,
n_jobs=n_jobs)
def del_source_files_given_langchain_mode_gr(db1s, selection_docs_state1, requests_state1, document_choice1,
langchain_mode,
dbs=None,
load_db_if_exists=None,
db_type=None,
use_openai_embedding=None,
hf_embedding_model=None,
migrate_embedding_model=None,
auto_migrate_db=None,
verbose=False,
get_userid_auth=None,
n_jobs=-1):
from src.gpt_langchain import get_source_files_given_langchain_mode
return get_source_files_given_langchain_mode(db1s, selection_docs_state1, requests_state1, document_choice1,
langchain_mode,
dbs=dbs,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
verbose=verbose,
get_userid_auth=get_userid_auth,
delete_sources=True,
n_jobs=n_jobs)
def update_and_get_source_files_given_langchain_mode_gr(db1s,
selection_docs_state,
requests_state,
langchain_mode, chunk, chunk_size,
image_loaders,
pdf_loaders,
url_loaders,
jq_schema,
captions_model=None,
caption_loader=None,
doctr_loader=None,
dbs=None, first_para=None,
hf_embedding_model=None,
use_openai_embedding=None,
migrate_embedding_model=None,
auto_migrate_db=None,
text_limit=None,
db_type=None, load_db_if_exists=None,
n_jobs=None, verbose=None, get_userid_auth=None,
image_loaders_options0=None,
pdf_loaders_options0=None,
url_loaders_options0=None,
jq_schema0=None):
from src.gpt_langchain import update_and_get_source_files_given_langchain_mode
loaders_dict, captions_model = gr_to_lg(image_loaders,
pdf_loaders,
url_loaders,
image_loaders_options0=image_loaders_options0,
pdf_loaders_options0=pdf_loaders_options0,
url_loaders_options0=url_loaders_options0,
captions_model=captions_model,
)
if jq_schema is None:
jq_schema = jq_schema0
loaders_dict.update(dict(captions_model=captions_model,
caption_loader=caption_loader,
doctr_loader=doctr_loader,
jq_schema=jq_schema,
))
return update_and_get_source_files_given_langchain_mode(db1s,
selection_docs_state,
requests_state,
langchain_mode, chunk, chunk_size,
**loaders_dict,
dbs=dbs, first_para=first_para,
hf_embedding_model=hf_embedding_model,
use_openai_embedding=use_openai_embedding,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
text_limit=text_limit,
db_type=db_type, load_db_if_exists=load_db_if_exists,
n_jobs=n_jobs, verbose=verbose,
get_userid_auth=get_userid_auth)
def set_userid_gr(db1s, requests_state1, get_userid_auth):
from src.gpt_langchain import set_userid
return set_userid(db1s, requests_state1, get_userid_auth)
def set_dbid_gr(db1):
from src.gpt_langchain import set_dbid
return set_dbid(db1)
def set_userid_direct_gr(db1s, userid, username):
from src.gpt_langchain import set_userid_direct
return set_userid_direct(db1s, userid, username)
| [
"Query Pre-Prompt",
"Added after documents",
"Prompt (or Custom)",
"prompt_query",
". For summarization, no query required, just click submit",
"Added before documents",
"Prompt (or Custom) (Model 2)",
"{'fn': PLACEHOLDER, 'inputs': PLACEHOLDER, 'outputs': PLACEHOLDER}",
"prompt_summary",
"prompt_type",
"If 'auto', then uses model's system prompt,",
" If empty, no system message is used",
"system_prompt",
"Added after documents (if query given, 'Focusing on {query}, ' is pre-appended)",
"pre_prompt_summary",
"Query Prompt",
"prompt_dict",
"HYDE approach for LLM getting answer to embed ('auto' means automatic, else enter template like '{query}'",
"model_lock",
"Prompt Type Model 2",
"Prompt Type",
"HYDE Embedding Template",
"Summary Pre-Prompt",
"System Prompt",
" else use this message.",
"pre_prompt_query",
"Summary Prompt"
] |
2024-01-10 | Royce-Geospatial-Consultants/h2ogpt_rg | src~image_captions.py | """
Based upon ImageCaptionLoader in LangChain version: langchain/document_loaders/image_captions.py
But accepts preloaded model to avoid slowness in use and CUDA forking issues
Loader that loads image captions
By default, the loader utilizes the pre-trained BLIP image captioning model.
https://huggingface.co/Salesforce/blip-image-captioning-base
"""
from typing import List, Union, Any, Tuple
import requests
from langchain.docstore.document import Document
from langchain.document_loaders import ImageCaptionLoader
from utils import get_device, NullContext, clear_torch_cache
from importlib.metadata import distribution, PackageNotFoundError
try:
assert distribution('bitsandbytes') is not None
have_bitsandbytes = True
except (PackageNotFoundError, AssertionError):
have_bitsandbytes = False
class H2OImageCaptionLoader(ImageCaptionLoader):
"""Loader that loads the captions of an image"""
def __init__(self, path_images: Union[str, List[str]] = None,
blip_processor: str = None,
blip_model: str = None,
caption_gpu=True,
load_in_8bit=True,
# True doesn't seem to work, even though https://huggingface.co/Salesforce/blip2-flan-t5-xxl#in-8-bit-precision-int8
load_half=False,
load_gptq='',
load_awq='',
load_exllama=False,
use_safetensors=False,
revision=None,
min_new_tokens=20,
max_tokens=50,
gpu_id='auto'):
if blip_model is None or blip_model is None:
blip_processor = "Salesforce/blip-image-captioning-base"
blip_model = "Salesforce/blip-image-captioning-base"
super().__init__(path_images, blip_processor, blip_model)
self.blip_processor = blip_processor
self.blip_model = blip_model
self.processor = None
self.model = None
self.caption_gpu = caption_gpu
self.context_class = NullContext
self.load_in_8bit = load_in_8bit and have_bitsandbytes # only for blip2
self.load_half = load_half
self.load_gptq = load_gptq
self.load_awq = load_awq
self.load_exllama = load_exllama
self.use_safetensors = use_safetensors
self.revision = revision
self.gpu_id = gpu_id
# default prompt
self.prompt = "image of"
self.min_new_tokens = min_new_tokens
self.max_tokens = max_tokens
self.device = 'cpu'
self.set_context()
def set_context(self):
if get_device() == 'cuda' and self.caption_gpu:
import torch
n_gpus = torch.cuda.device_count() if torch.cuda.is_available else 0
if n_gpus > 0:
self.context_class = torch.device
self.device = 'cuda'
else:
self.device = 'cpu'
else:
self.device = 'cpu'
def load_model(self):
try:
import transformers
except ImportError:
raise ValueError(
"`transformers` package not found, please install with "
"`pip install transformers`."
)
self.set_context()
if self.model:
if not self.load_in_8bit and self.model.device != self.device:
self.model.to(self.device)
return self
if self.caption_gpu:
if self.gpu_id == 'auto':
# blip2 has issues with multi-GPU. Error says need to somehow set language model in device map
# device_map = 'auto'
device_map = {"": 0}
else:
if self.device == 'cuda':
device_map = {"": self.gpu_id}
else:
device_map = {"": 'cpu'}
else:
device_map = {"": 'cpu'}
import torch
with torch.no_grad():
with self.context_class(self.device):
context_class_cast = NullContext if self.device == 'cpu' else torch.autocast
with context_class_cast(self.device):
if 'blip2' in self.blip_processor.lower():
from transformers import Blip2Processor, Blip2ForConditionalGeneration
if self.load_half and not self.load_in_8bit:
self.processor = Blip2Processor.from_pretrained(self.blip_processor,
device_map=device_map).half()
self.model = Blip2ForConditionalGeneration.from_pretrained(self.blip_model,
device_map=device_map).half()
else:
self.processor = Blip2Processor.from_pretrained(self.blip_processor,
load_in_8bit=self.load_in_8bit,
device_map=device_map,
)
self.model = Blip2ForConditionalGeneration.from_pretrained(self.blip_model,
load_in_8bit=self.load_in_8bit,
device_map=device_map)
else:
from transformers import BlipForConditionalGeneration, BlipProcessor
self.load_half = False # not supported
if self.caption_gpu:
if device_map == 'auto':
# Blip doesn't support device_map='auto'
if self.device == 'cuda':
if self.gpu_id == 'auto':
device_map = {"": 0}
else:
device_map = {"": self.gpu_id}
else:
device_map = {"": 'cpu'}
else:
device_map = {"": 'cpu'}
self.processor = BlipProcessor.from_pretrained(self.blip_processor, device_map=device_map)
self.model = BlipForConditionalGeneration.from_pretrained(self.blip_model,
device_map=device_map)
return self
def set_image_paths(self, path_images: Union[str, List[str]]):
"""
Load from a list of image files
"""
if isinstance(path_images, str):
self.image_paths = [path_images]
else:
self.image_paths = path_images
def load(self, prompt=None) -> List[Document]:
if self.processor is None or self.model is None:
self.load_model()
results = []
for path_image in self.image_paths:
caption, metadata = self._get_captions_and_metadata(
model=self.model, processor=self.processor, path_image=path_image,
prompt=prompt,
)
doc = Document(page_content=caption, metadata=metadata)
results.append(doc)
return results
def unload_model(self):
if hasattr(self, 'model') and hasattr(self.model, 'cpu'):
self.model.cpu()
clear_torch_cache()
def _get_captions_and_metadata(
self, model: Any, processor: Any, path_image: str,
prompt=None) -> Tuple[str, dict]:
"""
Helper function for getting the captions and metadata of an image
"""
if prompt is None:
prompt = self.prompt
try:
from PIL import Image
except ImportError:
raise ValueError(
"`PIL` package not found, please install with `pip install pillow`"
)
try:
if path_image.startswith("http://") or path_image.startswith("https://"):
image = Image.open(requests.get(path_image, stream=True).raw).convert(
"RGB"
)
else:
image = Image.open(path_image).convert("RGB")
except Exception:
raise ValueError(f"Could not get image data for {path_image}")
import torch
with torch.no_grad():
with self.context_class(self.device):
context_class_cast = NullContext if self.device == 'cpu' else torch.autocast
with context_class_cast(self.device):
if self.load_half:
# FIXME: RuntimeError: "slow_conv2d_cpu" not implemented for 'Half'
inputs = processor(image, prompt, return_tensors="pt") # .half()
else:
inputs = processor(image, prompt, return_tensors="pt")
min_length = len(prompt) // 4 + self.min_new_tokens
self.max_tokens = max(self.max_tokens, min_length)
inputs.to(model.device)
output = model.generate(**inputs, min_length=min_length, max_length=self.max_tokens)
caption: str = processor.decode(output[0], skip_special_tokens=True)
prompti = caption.find(prompt)
if prompti >= 0:
caption = caption[prompti + len(prompt):]
metadata: dict = {"image_path": path_image}
return caption, metadata
| [] |
2024-01-10 | Royce-Geospatial-Consultants/h2ogpt_rg | src~gpt_langchain.py | import ast
import asyncio
import copy
import functools
import glob
import gzip
import inspect
import json
import os
import pathlib
import pickle
import shutil
import subprocess
import sys
import tempfile
import time
import traceback
import types
import typing
import urllib.error
import uuid
import zipfile
from collections import defaultdict
from datetime import datetime
from functools import reduce
from operator import concat
import filelock
import tabulate
import yaml
from joblib import delayed
from langchain.callbacks import streaming_stdout
from langchain.callbacks.base import Callbacks
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms.huggingface_pipeline import VALID_TASKS
from langchain.llms.utils import enforce_stop_tokens
from langchain.prompts.chat import ChatPromptValue
from langchain.schema import LLMResult, Generation, PromptValue
from langchain.schema.output import GenerationChunk
from langchain.tools import PythonREPLTool
from langchain.tools.json.tool import JsonSpec
from tqdm import tqdm
from src.db_utils import length_db1, set_dbid, set_userid, get_dbid, get_userid_direct, get_username_direct, \
set_userid_direct
from utils import wrapped_partial, EThread, import_matplotlib, sanitize_filename, makedirs, get_url, flatten_list, \
get_device, ProgressParallel, remove, hash_file, clear_torch_cache, NullContext, get_hf_server, FakeTokenizer, \
have_libreoffice, have_arxiv, have_playwright, have_selenium, have_tesseract, have_doctr, have_pymupdf, set_openai, \
get_list_or_str, have_pillow, only_selenium, only_playwright, only_unstructured_urls, get_sha, get_short_name, \
get_accordion, have_jq, get_doc, get_source, have_chromamigdb, get_token_count, reverse_ucurve_list, get_size, \
get_test_name_core
from enums import DocumentSubset, no_lora_str, model_token_mapping, source_prefix, source_postfix, non_query_commands, \
LangChainAction, LangChainMode, DocumentChoice, LangChainTypes, font_size, head_acc, super_source_prefix, \
super_source_postfix, langchain_modes_intrinsic, get_langchain_prompts, LangChainAgent, docs_joiner_default, \
docs_ordering_types_default, langchain_modes_non_db, \
does_support_functiontools, auto_choices
from evaluate_params import gen_hyper, gen_hyper0
from gen import get_model, SEED, get_limited_prompt, get_docs_tokens, get_relaxed_max_new_tokens
from prompter import non_hf_types, PromptType, Prompter
from src.serpapi import H2OSerpAPIWrapper
from utils_langchain import StreamingGradioCallbackHandler, _chunk_sources, _add_meta, add_parser, fix_json_meta, \
load_general_summarization_chain
import_matplotlib()
import numpy as np
import pandas as pd
import requests
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
# , GCSDirectoryLoader, GCSFileLoader
# , OutlookMessageLoader # GPL3
# ImageCaptionLoader, # use our own wrapper
# ReadTheDocsLoader, # no special file, some path, so have to give as special option
from langchain.document_loaders import PyPDFLoader, TextLoader, CSVLoader, PythonLoader, TomlLoader, \
UnstructuredURLLoader, UnstructuredHTMLLoader, UnstructuredWordDocumentLoader, UnstructuredMarkdownLoader, \
EverNoteLoader, UnstructuredEmailLoader, UnstructuredODTLoader, UnstructuredPowerPointLoader, \
UnstructuredEPubLoader, UnstructuredImageLoader, UnstructuredRTFLoader, ArxivLoader, UnstructuredPDFLoader, \
UnstructuredExcelLoader, JSONLoader
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter, TextSplitter, CharacterTextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.llms import HuggingFaceTextGenInference, HuggingFacePipeline
from langchain.vectorstores import Chroma
from chromamig import ChromaMig
def split_list(input_list, split_size):
for i in range(0, len(input_list), split_size):
yield input_list[i:i + split_size]
def get_db(sources, use_openai_embedding=False, db_type='faiss',
persist_directory=None, load_db_if_exists=True,
langchain_mode='notset',
langchain_mode_paths={},
langchain_mode_types={},
collection_name=None,
hf_embedding_model=None,
migrate_embedding_model=False,
auto_migrate_db=False,
n_jobs=-1):
if not sources:
return None
user_path = langchain_mode_paths.get(langchain_mode)
if persist_directory is None:
langchain_type = langchain_mode_types.get(langchain_mode, LangChainTypes.EITHER.value)
persist_directory, langchain_type = get_persist_directory(langchain_mode, langchain_type=langchain_type)
langchain_mode_types[langchain_mode] = langchain_type
assert hf_embedding_model is not None
# get freshly-determined embedding model
embedding = get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model)
assert collection_name is not None or langchain_mode != 'notset'
if collection_name is None:
collection_name = langchain_mode.replace(' ', '_')
# Create vector database
if db_type == 'faiss':
from langchain.vectorstores import FAISS
db = FAISS.from_documents(sources, embedding)
elif db_type == 'weaviate':
import weaviate
from weaviate.embedded import EmbeddedOptions
from langchain.vectorstores import Weaviate
if os.getenv('WEAVIATE_URL', None):
client = _create_local_weaviate_client()
else:
client = weaviate.Client(
embedded_options=EmbeddedOptions(persistence_data_path=persist_directory)
)
index_name = collection_name.capitalize()
db = Weaviate.from_documents(documents=sources, embedding=embedding, client=client, by_text=False,
index_name=index_name)
elif db_type in ['chroma', 'chroma_old']:
assert persist_directory is not None
# use_base already handled when making persist_directory, unless was passed into get_db()
makedirs(persist_directory, exist_ok=True)
# see if already actually have persistent db, and deal with possible changes in embedding
db, use_openai_embedding, hf_embedding_model = \
get_existing_db(None, persist_directory, load_db_if_exists, db_type,
use_openai_embedding,
langchain_mode, langchain_mode_paths, langchain_mode_types,
hf_embedding_model, migrate_embedding_model, auto_migrate_db,
verbose=False,
n_jobs=n_jobs)
if db is None:
import logging
logging.getLogger("chromadb").setLevel(logging.ERROR)
if db_type == 'chroma':
from chromadb.config import Settings
settings_extra_kwargs = dict(is_persistent=True)
else:
from chromamigdb.config import Settings
settings_extra_kwargs = dict(chroma_db_impl="duckdb+parquet")
client_settings = Settings(anonymized_telemetry=False,
persist_directory=persist_directory,
**settings_extra_kwargs)
if n_jobs in [None, -1]:
n_jobs = int(os.getenv('OMP_NUM_THREADS', str(os.cpu_count() // 2)))
num_threads = max(1, min(n_jobs, 8))
else:
num_threads = max(1, n_jobs)
collection_metadata = {"hnsw:num_threads": num_threads}
from_kwargs = dict(embedding=embedding,
persist_directory=persist_directory,
collection_name=collection_name,
client_settings=client_settings,
collection_metadata=collection_metadata)
if db_type == 'chroma':
import chromadb
api = chromadb.PersistentClient(path=persist_directory)
if hasattr(api._producer, 'max_batch_size'):
max_batch_size = api._producer.max_batch_size
else:
max_batch_size = 1000
sources_batches = split_list(sources, max_batch_size)
for sources_batch in sources_batches:
db = Chroma.from_documents(documents=sources_batch, **from_kwargs)
db.persist()
else:
db = ChromaMig.from_documents(documents=sources, **from_kwargs)
clear_embedding(db)
save_embed(db, use_openai_embedding, hf_embedding_model)
else:
# then just add
# doesn't check or change embedding, just saves it in case not saved yet, after persisting
db, num_new_sources, new_sources_metadata = add_to_db(db, sources, db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model)
else:
raise RuntimeError("No such db_type=%s" % db_type)
# once here, db is not changing and embedding choices in calling functions does not matter
return db
def _get_unique_sources_in_weaviate(db):
batch_size = 100
id_source_list = []
result = db._client.data_object.get(class_name=db._index_name, limit=batch_size)
while result['objects']:
id_source_list += [(obj['id'], obj['properties']['source']) for obj in result['objects']]
last_id = id_source_list[-1][0]
result = db._client.data_object.get(class_name=db._index_name, limit=batch_size, after=last_id)
unique_sources = {source for _, source in id_source_list}
return unique_sources
def del_from_db(db, sources, db_type=None):
if db_type in ['chroma', 'chroma_old'] and db is not None:
# sources should be list of x.metadata['source'] from document metadatas
if isinstance(sources, str):
sources = [sources]
else:
assert isinstance(sources, (list, tuple, types.GeneratorType))
metadatas = set(sources)
client_collection = db._client.get_collection(name=db._collection.name,
embedding_function=db._collection._embedding_function)
for source in metadatas:
meta = dict(source=source)
try:
client_collection.delete(where=meta)
except KeyError:
pass
def add_to_db(db, sources, db_type='faiss',
avoid_dup_by_file=False,
avoid_dup_by_content=True,
use_openai_embedding=False,
hf_embedding_model=None):
assert hf_embedding_model is not None
num_new_sources = len(sources)
if not sources:
return db, num_new_sources, []
if db_type == 'faiss':
db.add_documents(sources)
elif db_type == 'weaviate':
# FIXME: only control by file name, not hash yet
if avoid_dup_by_file or avoid_dup_by_content:
unique_sources = _get_unique_sources_in_weaviate(db)
sources = [x for x in sources if x.metadata['source'] not in unique_sources]
num_new_sources = len(sources)
if num_new_sources == 0:
return db, num_new_sources, []
db.add_documents(documents=sources)
elif db_type in ['chroma', 'chroma_old']:
collection = get_documents(db)
# files we already have:
metadata_files = set([x['source'] for x in collection['metadatas']])
if avoid_dup_by_file:
# Too weak in case file changed content, assume parent shouldn't pass true for this for now
raise RuntimeError("Not desired code path")
if avoid_dup_by_content:
# look at hash, instead of page_content
# migration: If no hash previously, avoid updating,
# since don't know if need to update and may be expensive to redo all unhashed files
metadata_hash_ids = set(
[x['hashid'] for x in collection['metadatas'] if 'hashid' in x and x['hashid'] not in ["None", None]])
# avoid sources with same hash
sources = [x for x in sources if x.metadata.get('hashid') not in metadata_hash_ids]
num_nohash = len([x for x in sources if not x.metadata.get('hashid')])
print("Found %s new sources (%d have no hash in original source,"
" so have to reprocess for migration to sources with hash)" % (len(sources), num_nohash), flush=True)
# get new file names that match existing file names. delete existing files we are overridding
dup_metadata_files = set([x.metadata['source'] for x in sources if x.metadata['source'] in metadata_files])
print("Removing %s duplicate files from db because ingesting those as new documents" % len(
dup_metadata_files), flush=True)
client_collection = db._client.get_collection(name=db._collection.name,
embedding_function=db._collection._embedding_function)
for dup_file in dup_metadata_files:
dup_file_meta = dict(source=dup_file)
try:
client_collection.delete(where=dup_file_meta)
except KeyError:
pass
num_new_sources = len(sources)
if num_new_sources == 0:
return db, num_new_sources, []
if hasattr(db, '_persist_directory'):
print("Existing db, adding to %s" % db._persist_directory, flush=True)
# chroma only
lock_file = get_db_lock_file(db)
context = filelock.FileLock
else:
lock_file = None
context = NullContext
with context(lock_file):
# this is place where add to db, but others maybe accessing db, so lock access.
# else see RuntimeError: Index seems to be corrupted or unsupported
import chromadb
api = chromadb.PersistentClient(path=db._persist_directory)
if hasattr(api._producer, 'max_batch_size'):
max_batch_size = api._producer.max_batch_size
else:
max_batch_size = 1000
sources_batches = split_list(sources, max_batch_size)
for sources_batch in sources_batches:
db.add_documents(documents=sources_batch)
db.persist()
clear_embedding(db)
# save here is for migration, in case old db directory without embedding saved
save_embed(db, use_openai_embedding, hf_embedding_model)
else:
raise RuntimeError("No such db_type=%s" % db_type)
new_sources_metadata = [x.metadata for x in sources]
return db, num_new_sources, new_sources_metadata
def create_or_update_db(db_type, persist_directory, collection_name,
user_path, langchain_type,
sources, use_openai_embedding, add_if_exists, verbose,
hf_embedding_model, migrate_embedding_model, auto_migrate_db,
n_jobs=-1):
if not os.path.isdir(persist_directory) or not add_if_exists:
if os.path.isdir(persist_directory):
if verbose:
print("Removing %s" % persist_directory, flush=True)
remove(persist_directory)
if verbose:
print("Generating db", flush=True)
if db_type == 'weaviate':
import weaviate
from weaviate.embedded import EmbeddedOptions
if os.getenv('WEAVIATE_URL', None):
client = _create_local_weaviate_client()
else:
client = weaviate.Client(
embedded_options=EmbeddedOptions(persistence_data_path=persist_directory)
)
index_name = collection_name.replace(' ', '_').capitalize()
if client.schema.exists(index_name) and not add_if_exists:
client.schema.delete_class(index_name)
if verbose:
print("Removing %s" % index_name, flush=True)
elif db_type in ['chroma', 'chroma_old']:
pass
if not add_if_exists:
if verbose:
print("Generating db", flush=True)
else:
if verbose:
print("Loading and updating db", flush=True)
db = get_db(sources,
use_openai_embedding=use_openai_embedding,
db_type=db_type,
persist_directory=persist_directory,
langchain_mode=collection_name,
langchain_mode_paths={collection_name: user_path},
langchain_mode_types={collection_name: langchain_type},
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
n_jobs=n_jobs)
return db
from langchain.embeddings import FakeEmbeddings
class H2OFakeEmbeddings(FakeEmbeddings):
"""Fake embedding model, but constant instead of random"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self) -> typing.List[float]:
return [1] * self.size
def embed_documents(self, texts: typing.List[str]) -> typing.List[typing.List[float]]:
return [self._get_embedding() for _ in texts]
def embed_query(self, text: str) -> typing.List[float]:
return self._get_embedding()
def get_embedding(use_openai_embedding, hf_embedding_model=None, preload=False):
assert hf_embedding_model is not None
# Get embedding model
if use_openai_embedding:
assert os.getenv("OPENAI_API_KEY") is not None, "Set ENV OPENAI_API_KEY"
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings(disallowed_special=())
elif hf_embedding_model == 'fake':
embedding = H2OFakeEmbeddings(size=1)
else:
if isinstance(hf_embedding_model, str):
pass
elif isinstance(hf_embedding_model, dict):
# embedding itself preloaded globally
return hf_embedding_model['model']
else:
# object
return hf_embedding_model
# to ensure can fork without deadlock
from langchain.embeddings import HuggingFaceEmbeddings
device, torch_dtype, context_class = get_device_dtype()
model_kwargs = dict(device=device)
if 'instructor' in hf_embedding_model:
encode_kwargs = {'normalize_embeddings': True}
embedding = HuggingFaceInstructEmbeddings(model_name=hf_embedding_model,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs)
else:
embedding = HuggingFaceEmbeddings(model_name=hf_embedding_model, model_kwargs=model_kwargs)
embedding.client.preload = preload
return embedding
def get_answer_from_sources(chain, sources, question):
return chain(
{
"input_documents": sources,
"question": question,
},
return_only_outputs=True,
)["output_text"]
"""Wrapper around Huggingface text generation inference API."""
from functools import partial
from typing import Any, Dict, List, Optional, Set, Iterable
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun
from langchain.llms.base import LLM
class H2Oagenerate:
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
if self.verbose:
print("_agenerate H2O", flush=True)
generations = []
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
self.count_input_tokens += sum([self.get_num_tokens(prompt) for prompt in prompts])
tasks = [
asyncio.ensure_future(self._agenerate_one(prompt, stop=stop, run_manager=run_manager,
new_arg_supported=new_arg_supported, **kwargs))
for prompt in prompts
]
texts = await asyncio.gather(*tasks)
self.count_output_tokens += sum([self.get_num_tokens(text) for text in texts])
[generations.append([Generation(text=text)]) for text in texts]
if self.verbose:
print("done _agenerate H2O", flush=True)
return LLMResult(generations=generations)
async def _agenerate_one(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
new_arg_supported=None,
**kwargs: Any,
) -> str:
async_sem = NullContext() if self.async_sem is None else self.async_sem
async with async_sem: # semaphore limits num of simultaneous downloads
return await self._acall(prompt, stop=stop, run_manager=run_manager, **kwargs) \
if new_arg_supported else \
await self._acall(prompt, stop=stop, **kwargs)
class GradioInference(H2Oagenerate, LLM):
"""
Gradio generation inference API.
"""
inference_server_url: str = ""
temperature: float = 0.8
top_p: Optional[float] = 0.95
top_k: Optional[int] = None
penalty_alpha: Optional[float] = 0.0
num_beams: Optional[int] = 1
max_new_tokens: int = 512
max_new_tokens0: int = 512
min_new_tokens: int = 1
early_stopping: bool = False
max_time: int = 180
repetition_penalty: Optional[float] = None
num_return_sequences: Optional[int] = 1
do_sample: bool = False
chat_client: bool = False
return_full_text: bool = False
stream_output: bool = False
sanitize_bot_response: bool = False
prompter: Any = None
context: Any = ''
iinput: Any = ''
client: Any = None
tokenizer: Any = None
system_prompt: Any = None
visible_models: Any = None
h2ogpt_key: Any = None
async_sem: Any = None
count_input_tokens: Any = 0
count_output_tokens: Any = 0
min_max_new_tokens: Any = 256
max_input_tokens: Any = -1
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
if values['client'] is None:
from gradio_utils.grclient import GradioClient
values["client"] = GradioClient(
values["inference_server_url"]
).setup()
except ImportError:
raise ImportError(
"Could not import gradio_client python package. "
"Please install it with `pip install gradio_client`."
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gradio_inference"
def setup_call(self, prompt):
# NOTE: prompt here has no prompt_type (e.g. human: bot:) prompt injection,
# so server should get prompt_type or '', not plain
# This is good, so gradio server can also handle stopping.py conditions
# this is different than TGI server that uses prompter to inject prompt_type prompting
stream_output = self.stream_output
client_langchain_mode = 'Disabled'
client_add_chat_history_to_context = True
client_add_search_to_context = False
client_chat_conversation = []
client_langchain_action = LangChainAction.QUERY.value
client_langchain_agents = []
top_k_docs = 1
chunk = True
chunk_size = 512
client_kwargs = dict(instruction=prompt if self.chat_client else '', # only for chat=True
iinput=self.iinput if self.chat_client else '', # only for chat=True
context=self.context,
# streaming output is supported, loops over and outputs each generation in streaming mode
# but leave stream_output=False for simple input/output mode
stream_output=stream_output,
prompt_type=self.prompter.prompt_type,
prompt_dict='',
temperature=self.temperature,
top_p=self.top_p,
top_k=self.top_k,
penalty_alpha=self.penalty_alpha,
num_beams=self.num_beams,
max_new_tokens=self.max_new_tokens,
min_new_tokens=self.min_new_tokens,
early_stopping=self.early_stopping,
max_time=self.max_time,
repetition_penalty=self.repetition_penalty,
num_return_sequences=self.num_return_sequences,
do_sample=self.do_sample,
chat=self.chat_client,
instruction_nochat=prompt if not self.chat_client else '',
iinput_nochat=self.iinput if not self.chat_client else '',
langchain_mode=client_langchain_mode,
add_chat_history_to_context=client_add_chat_history_to_context,
langchain_action=client_langchain_action,
langchain_agents=client_langchain_agents,
top_k_docs=top_k_docs,
chunk=chunk,
chunk_size=chunk_size,
document_subset=DocumentSubset.Relevant.name,
document_choice=[DocumentChoice.ALL.value],
pre_prompt_query=None,
prompt_query=None,
pre_prompt_summary=None,
prompt_summary=None,
system_prompt=self.system_prompt,
image_loaders=None, # don't need to further do doc specific things
pdf_loaders=None, # don't need to further do doc specific things
url_loaders=None, # don't need to further do doc specific things
jq_schema=None, # don't need to further do doc specific things
visible_models=self.visible_models,
h2ogpt_key=self.h2ogpt_key,
add_search_to_context=client_add_search_to_context,
chat_conversation=client_chat_conversation,
text_context_list=None,
docs_ordering_type=None,
min_max_new_tokens=self.min_max_new_tokens,
max_input_tokens=self.max_input_tokens,
docs_token_handling=None,
docs_joiner=None,
hyde_level=None,
hyde_template=None,
)
api_name = '/submit_nochat_api' # NOTE: like submit_nochat but stable API for string dict passing
self.count_input_tokens += self.get_num_tokens(prompt)
return client_kwargs, api_name
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.verbose:
print("_call", flush=True)
client_kwargs, api_name = self.setup_call(prompt)
max_new_tokens = get_relaxed_max_new_tokens(prompt, tokenizer=self.tokenizer,
max_new_tokens=self.max_new_tokens,
max_new_tokens0=self.max_new_tokens0)
client_kwargs.update(dict(max_new_tokens=get_relaxed_max_new_tokens(max_new_tokens)))
# new client for each call
client = self.client.clone()
from gradio_utils.grclient import check_job
if not self.stream_output:
res = client.predict(str(dict(client_kwargs)), api_name=api_name)
res_dict = ast.literal_eval(res)
text = res_dict['response']
ret = self.prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
self.count_output_tokens += self.get_num_tokens(ret)
if self.verbose:
print("end _call", flush=True)
return ret
else:
text_callback = None
if run_manager:
text_callback = partial(
run_manager.on_llm_new_token, verbose=self.verbose
)
job = client.submit(str(dict(client_kwargs)), api_name=api_name)
text0 = ''
while not job.done():
if job.communicator.job.latest_status.code.name == 'FINISHED':
break
e = check_job(job, timeout=0, raise_exception=False)
if e is not None:
break
outputs_list = job.communicator.job.outputs
if outputs_list:
res = job.communicator.job.outputs[-1]
res_dict = ast.literal_eval(res)
text = res_dict['response']
text = self.prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
# FIXME: derive chunk from full for now
text_chunk = text[len(text0):]
if not text_chunk:
# just need some sleep for threads to switch
time.sleep(0.001)
continue
# save old
text0 = text
if text_callback:
text_callback(text_chunk)
time.sleep(0.01)
# ensure get last output to avoid race
res_all = job.outputs()
if len(res_all) > 0:
# don't raise unless nochat API for now
# set below to True for now, not not self.chat_client, since not handling exception otherwise
# in some return of strex
check_job(job, timeout=0.02, raise_exception=True)
res = res_all[-1]
res_dict = ast.literal_eval(res)
text = res_dict['response']
# FIXME: derive chunk from full for now
else:
# if got no answer at all, probably something bad, always raise exception
# UI will still put exception in Chat History under chat exceptions
check_job(job, timeout=0.3, raise_exception=True)
# go with old if failure
text = text0
text_chunk = text[len(text0):]
if text_callback:
text_callback(text_chunk)
ret = self.prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
self.count_output_tokens += self.get_num_tokens(ret)
if self.verbose:
print("end _call", flush=True)
return ret
# copy-paste of streaming part of _call() with asyncio.sleep instead
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.verbose:
print("_acall", flush=True)
client_kwargs, api_name = self.setup_call(prompt)
text_callback = None
if run_manager:
text_callback = partial(
run_manager.on_llm_new_token, verbose=self.verbose
)
# new client for each acall
client = self.client.clone()
job = client.submit(str(dict(client_kwargs)), api_name=api_name)
text0 = ''
while not job.done():
if job.communicator.job.latest_status.code.name == 'FINISHED':
break
e = job.future._exception
if e is not None:
break
outputs_list = job.communicator.job.outputs
if outputs_list:
res = job.communicator.job.outputs[-1]
res_dict = ast.literal_eval(res)
text = res_dict['response']
text = self.prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
# FIXME: derive chunk from full for now
text_chunk = text[len(text0):]
if not text_chunk:
# just need some sleep for threads to switch
await asyncio.sleep(0.001)
continue
# save old
text0 = text
if text_callback:
await text_callback(text_chunk)
await asyncio.sleep(0.01)
# ensure get last output to avoid race
res_all = job.outputs()
if len(res_all) > 0:
res = res_all[-1]
res_dict = ast.literal_eval(res)
text = res_dict['response']
# FIXME: derive chunk from full for now
else:
# go with old if failure
text = text0
text_chunk = text[len(text0):]
if text_callback:
await text_callback(text_chunk)
ret = self.prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
self.count_output_tokens += self.get_num_tokens(ret)
if self.verbose:
print("end _acall", flush=True)
return ret
def get_token_ids(self, text: str) -> List[int]:
return self.tokenizer.encode(text)
# avoid base method that is not aware of how to properly tokenize (uses GPT2)
# return _get_token_ids_default_method(text)
class H2OHuggingFaceTextGenInference(H2Oagenerate, HuggingFaceTextGenInference):
max_new_tokens: int = 512
do_sample: bool = False
top_p: Optional[float] = 0.95
top_k: Optional[int] = None
penalty_alpha: Optional[float] = 0.0
typical_p: Optional[float] = 0.95
temperature: float = 0.8
repetition_penalty: Optional[float] = None
return_full_text: bool = False
stop_sequences: List[str] = Field(default_factory=list)
seed: Optional[int] = None
inference_server_url: str = ""
timeout: int = 300
headers: dict = None
stream_output: bool = False
sanitize_bot_response: bool = False
prompter: Any = None
context: Any = ''
iinput: Any = ''
tokenizer: Any = None
async_sem: Any = None
count_input_tokens: Any = 0
count_output_tokens: Any = 0
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if stop is None:
stop = self.stop_sequences.copy()
else:
stop += self.stop_sequences.copy()
stop_tmp = stop.copy()
stop = []
[stop.append(x) for x in stop_tmp if x not in stop]
# HF inference server needs control over input tokens
assert self.tokenizer is not None
from h2oai_pipeline import H2OTextGenerationPipeline
prompt, num_prompt_tokens = H2OTextGenerationPipeline.limit_prompt(prompt, self.tokenizer)
# NOTE: TGI server does not add prompting, so must do here
data_point = dict(context=self.context, instruction=prompt, input=self.iinput)
prompt = self.prompter.generate_prompt(data_point)
self.count_input_tokens += self.get_num_tokens(prompt)
gen_server_kwargs = dict(do_sample=self.do_sample,
stop_sequences=stop,
max_new_tokens=self.max_new_tokens,
top_p=self.top_p,
top_k=self.top_k,
typical_p=self.typical_p,
# penalty_alpha=self.penalty_alpha,
temperature=self.temperature,
repetition_penalty=self.repetition_penalty,
return_full_text=self.return_full_text,
seed=self.seed,
)
gen_server_kwargs.update(kwargs)
# lower bound because client is re-used if multi-threading
self.client.timeout = max(300, self.timeout)
if not self.stream_output:
res = self.client.generate(
prompt,
**gen_server_kwargs,
)
if self.return_full_text:
gen_text = res.generated_text[len(prompt):]
else:
gen_text = res.generated_text
# remove stop sequences from the end of the generated text
for stop_seq in stop:
if stop_seq in gen_text:
gen_text = gen_text[:gen_text.index(stop_seq)]
text = prompt + gen_text
text = self.prompter.get_response(text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
else:
text_callback = None
if run_manager:
text_callback = partial(
run_manager.on_llm_new_token, verbose=self.verbose
)
text = ""
# Note: Streaming ignores return_full_text=True
for response in self.client.generate_stream(prompt, **gen_server_kwargs):
text_chunk = response.token.text
text += text_chunk
text = self.prompter.get_response(prompt + text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
# stream part
is_stop = False
for stop_seq in stop:
if stop_seq in text_chunk:
is_stop = True
break
if is_stop:
break
if not response.token.special:
if text_callback:
text_callback(text_chunk)
self.count_output_tokens += self.get_num_tokens(text)
return text
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.verbose:
print("acall", flush=True)
if stop is None:
stop = self.stop_sequences.copy()
else:
stop += self.stop_sequences.copy()
stop_tmp = stop.copy()
stop = []
[stop.append(x) for x in stop_tmp if x not in stop]
# HF inference server needs control over input tokens
assert self.tokenizer is not None
from h2oai_pipeline import H2OTextGenerationPipeline
prompt, num_prompt_tokens = H2OTextGenerationPipeline.limit_prompt(prompt, self.tokenizer)
# NOTE: TGI server does not add prompting, so must do here
data_point = dict(context=self.context, instruction=prompt, input=self.iinput)
prompt = self.prompter.generate_prompt(data_point)
gen_text = await super()._acall(prompt, stop=stop, run_manager=run_manager, **kwargs)
# remove stop sequences from the end of the generated text
for stop_seq in stop:
if stop_seq in gen_text:
gen_text = gen_text[:gen_text.index(stop_seq)]
text = prompt + gen_text
text = self.prompter.get_response(text, prompt=prompt,
sanitize_bot_response=self.sanitize_bot_response)
if self.verbose:
print("acall done", flush=True)
return text
def get_token_ids(self, text: str) -> List[int]:
return self.tokenizer.encode(text)
# avoid base method that is not aware of how to properly tokenize (uses GPT2)
# return _get_token_ids_default_method(text)
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.llms import OpenAI, AzureOpenAI, Replicate
class H2OOpenAI(OpenAI):
"""
New class to handle vLLM's use of OpenAI, no vllm_chat supported, so only need here
Handles prompting that OpenAI doesn't need, stopping as well
assume stop is used to keep out trailing text, and only generate new text,
so don't use self.prompter.get_response as becomes too complex
"""
stop_sequences: Any = None
sanitize_bot_response: bool = False
prompter: Any = None
context: Any = ''
iinput: Any = ''
tokenizer: Any = None
async_sem: Any = None
count_input_tokens: Any = 0
count_output_tokens: Any = 0
max_new_tokens0: Any = None
def update_prompts_and_stops(self, prompts, stop, **kwargs):
stop_tmp = self.stop_sequences if not stop else self.stop_sequences + stop
stop = []
[stop.append(x) for x in stop_tmp if x not in stop]
# HF inference server needs control over input tokens
assert self.tokenizer is not None
from h2oai_pipeline import H2OTextGenerationPipeline
for prompti, prompt in enumerate(prompts):
prompt, num_prompt_tokens = H2OTextGenerationPipeline.limit_prompt(prompt, self.tokenizer)
# NOTE: OpenAI/vLLM server does not add prompting, so must do here
data_point = dict(context=self.context, instruction=prompt, input=self.iinput)
prompt = self.prompter.generate_prompt(data_point)
prompts[prompti] = prompt
kwargs = self.update_kwargs(prompts, kwargs)
return prompts, stop, kwargs
def update_kwargs(self, prompts, kwargs):
# update kwargs per llm use, for when llm re-used for multiple prompts like summarization/extraction
# relax max_new_tokens if can
if self.max_new_tokens0 is not None and \
self.max_new_tokens0 > self.max_tokens and \
len(prompts) == 1 and \
'max_tokens' not in kwargs:
kwargs.update(dict(max_tokens=self.max_tokens_for_prompt(prompts[0])))
return kwargs
def max_tokens_for_prompt(self, prompt: str) -> int:
# like super() OpenAI version but added limit
num_tokens = self.get_num_tokens(prompt)
if self.max_new_tokens0 is not None:
return min(self.max_new_tokens0, self.tokenizer.model_max_length - num_tokens)
else:
return self.max_context_size - num_tokens
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.verbose:
print("Hit _generate", flush=True)
prompts, stop, kwargs = self.update_prompts_and_stops(prompts, stop, **kwargs)
return super()._generate(prompts, stop=stop, run_manager=run_manager, **kwargs)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> typing.Iterator[GenerationChunk]:
kwargs = self.update_kwargs([prompt], kwargs)
return super()._stream(prompt, stop=stop, run_manager=run_manager, **kwargs)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> typing.AsyncIterator[GenerationChunk]:
kwargs = self.update_kwargs([prompt], kwargs)
return await super()._astream(prompt, stop=stop, run_manager=run_manager, **kwargs)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
prompts, stop, kwargs = self.update_prompts_and_stops(prompts, stop, **kwargs)
if self.batch_size > 1 or self.streaming:
return await super()._agenerate(prompts, stop=stop, run_manager=run_manager, **kwargs)
else:
self.count_input_tokens += sum([self.get_num_tokens(prompt) for prompt in prompts])
tasks = [
asyncio.ensure_future(self._agenerate_one(prompt, stop=stop, run_manager=run_manager, **kwargs))
for prompt in prompts]
llm_results = await asyncio.gather(*tasks)
generations = [x.generations[0] for x in llm_results]
def reducer(accumulator, element):
for key, value in element.items():
accumulator[key] = accumulator.get(key, 0) + value
return accumulator
collection = [x.llm_output['token_usage'] for x in llm_results]
token_usage = reduce(reducer, collection, {})
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
self.count_output_tokens += token_usage.get('completion_tokens', 0)
return LLMResult(generations=generations, llm_output=llm_output)
async def _agenerate_one(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
async_sem = NullContext() if self.async_sem is None else self.async_sem
async with async_sem: # semaphore limits num of simultaneous downloads
prompts = [prompt]
# update for each async call
kwargs = self.update_kwargs(prompts, kwargs)
return await super(H2OOpenAI, self)._agenerate(prompts, stop=stop, run_manager=run_manager, **kwargs)
def get_token_ids(self, text: str) -> List[int]:
if self.tokenizer is not None:
return self.tokenizer.encode(text)
else:
# OpenAI uses tiktoken
return super().get_token_ids(text)
class H2OReplicate(Replicate):
stop_sequences: Any = None
sanitize_bot_response: bool = False
prompter: Any = None
context: Any = ''
iinput: Any = ''
tokenizer: Any = None
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
stop_tmp = self.stop_sequences if not stop else self.stop_sequences + stop
stop = []
[stop.append(x) for x in stop_tmp if x not in stop]
# HF inference server needs control over input tokens
assert self.tokenizer is not None
from h2oai_pipeline import H2OTextGenerationPipeline
prompt, num_prompt_tokens = H2OTextGenerationPipeline.limit_prompt(prompt, self.tokenizer)
# Note Replicate handles the prompting of the specific model, but not if history, so just do it all on our side
data_point = dict(context=self.context, instruction=prompt, input=self.iinput)
prompt = self.prompter.generate_prompt(data_point)
response = super()._call(prompt, stop=stop, run_manager=run_manager, **kwargs)
return response
def get_token_ids(self, text: str) -> List[int]:
return self.tokenizer.encode(text)
# avoid base method that is not aware of how to properly tokenize (uses GPT2)
# return _get_token_ids_default_method(text)
class ExtraChat:
def get_messages(self, prompts):
from langchain.schema import AIMessage, SystemMessage, HumanMessage
messages = []
if self.system_prompt:
messages.append(SystemMessage(content=self.system_prompt))
if self.chat_conversation:
for messages1 in self.chat_conversation:
messages.append(HumanMessage(content=messages1[0] if messages1[0] is not None else ''))
messages.append(AIMessage(content=messages1[1] if messages1[1] is not None else ''))
prompt_messages = []
for prompt in prompts:
if isinstance(prompt, ChatPromptValue):
prompt_message = messages + prompt.messages
else:
prompt_message = HumanMessage(content=prompt.text if prompt.text is not None else '')
prompt_message = messages + [prompt_message]
prompt_messages.append(prompt_message)
return prompt_messages
class H2OChatOpenAI(ChatOpenAI, ExtraChat):
tokenizer: Any = None # for vllm_chat
system_prompt: Any = None
chat_conversation: Any = []
max_new_tokens0: Any = None # FIXME: Doesn't seem to have same max_tokens == -1 for prompts==1
def get_token_ids(self, text: str) -> List[int]:
if self.tokenizer is not None:
return self.tokenizer.encode(text)
else:
# OpenAI uses tiktoken
return super().get_token_ids(text)
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
prompt_messages = self.get_messages(prompts)
# prompt_messages = [p.to_messages() for p in prompts]
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
prompt_messages = self.get_messages(prompts)
# prompt_messages = [p.to_messages() for p in prompts]
return await self.agenerate(
prompt_messages, stop=stop, callbacks=callbacks, **kwargs
)
class H2OAzureChatOpenAI(AzureChatOpenAI, ExtraChat):
system_prompt: Any = None
chat_conversation: Any = []
max_new_tokens0: Any = None # FIXME: Doesn't seem to have same max_tokens == -1 for prompts==1
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
prompt_messages = self.get_messages(prompts)
# prompt_messages = [p.to_messages() for p in prompts]
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
prompt_messages = self.get_messages(prompts)
# prompt_messages = [p.to_messages() for p in prompts]
return await self.agenerate(
prompt_messages, stop=stop, callbacks=callbacks, **kwargs
)
class H2OAzureOpenAI(AzureOpenAI):
max_new_tokens0: Any = None # FIXME: Doesn't seem to have same max_tokens == -1 for prompts==1
class H2OHuggingFacePipeline(HuggingFacePipeline):
count_input_tokens: Any = 0
count_output_tokens: Any = 0
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
self.count_input_tokens += self.get_num_tokens(prompt)
response = self.pipeline(prompt, stop=stop)
if self.pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt):]
elif self.pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif self.pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
self.count_output_tokens += self.get_num_tokens(text)
return text
def get_token_ids(self, text: str) -> List[int]:
tokenizer = self.pipeline.tokenizer
if tokenizer is not None:
return tokenizer.encode(text)
else:
return FakeTokenizer().encode(text)['input_ids']
def get_llm(use_openai_model=False,
model_name=None,
model=None,
tokenizer=None,
inference_server=None,
langchain_only_model=None,
stream_output=False,
async_output=True,
num_async=3,
do_sample=False,
temperature=0.1,
top_p=0.7,
top_k=40,
penalty_alpha=0.0,
num_beams=1,
max_new_tokens=512,
max_new_tokens0=512,
min_new_tokens=1,
early_stopping=False,
max_time=180,
repetition_penalty=1.0,
num_return_sequences=1,
prompt_type=None,
prompt_dict=None,
prompter=None,
context=None,
iinput=None,
chat_conversation=None,
sanitize_bot_response=False,
system_prompt='',
visible_models=0,
h2ogpt_key=None,
min_max_new_tokens=None,
max_input_tokens=None,
attention_sinks=None,
truncation_generation=None,
n_jobs=None,
cli=False,
llamacpp_dict=None,
exllama_dict=None,
verbose=False,
):
# make all return only new text, so other uses work as expected, like summarization
only_new_text = True
gradio_server = False
if chat_conversation is None:
chat_conversation = []
fake_for_tests = ['test_qa', 'test_make_add_db', 'test_many_text', 'test_chroma_filtering']
if os.getenv('HARD_ASSERTS') and tokenizer is None and any([x in get_test_name_core() for x in fake_for_tests]):
# allow certain tests to use fake one
tokenizer = FakeTokenizer()
max_input_tokens = 1024
min_max_new_tokens = 256
model_max_length = tokenizer.model_max_length
if max_input_tokens >= 0:
max_input_tokens = min(model_max_length - min_max_new_tokens, max_input_tokens)
else:
max_input_tokens = model_max_length - min_max_new_tokens
if n_jobs in [None, -1]:
n_jobs = int(os.getenv('OMP_NUM_THREADS', str(os.cpu_count() // 2)))
if inference_server is None:
inference_server = ''
if inference_server.startswith('replicate'):
model_string = ':'.join(inference_server.split(':')[1:])
if 'meta/llama' in model_string:
temperature = max(0.01, temperature if do_sample else 0)
else:
temperature = temperature if do_sample else 0
gen_kwargs = dict(temperature=temperature,
seed=1234,
max_length=max_new_tokens, # langchain
max_new_tokens=max_new_tokens, # replicate docs
top_p=top_p if do_sample else 1,
top_k=top_k, # not always supported
repetition_penalty=repetition_penalty)
if system_prompt in auto_choices:
if prompter.system_prompt:
system_prompt = prompter.system_prompt
else:
system_prompt = ''
if system_prompt:
gen_kwargs.update(dict(system_prompt=system_prompt))
# replicate handles prompting if no conversation, but in general has no chat API, so do all handling of prompting in h2oGPT
if stream_output:
callbacks = [StreamingGradioCallbackHandler(max_time=max_time, verbose=verbose)]
streamer = callbacks[0] if stream_output else None
llm = H2OReplicate(
streaming=True,
callbacks=callbacks,
model=model_string,
input=gen_kwargs,
stop=prompter.stop_sequences,
stop_sequences=prompter.stop_sequences,
sanitize_bot_response=sanitize_bot_response,
prompter=prompter,
context=context,
iinput=iinput,
tokenizer=tokenizer,
verbose=verbose,
)
else:
streamer = None
llm = H2OReplicate(
model=model_string,
input=gen_kwargs,
stop=prompter.stop_sequences,
stop_sequences=prompter.stop_sequences,
sanitize_bot_response=sanitize_bot_response,
prompter=prompter,
context=context,
iinput=iinput,
tokenizer=tokenizer,
verbose=verbose,
)
elif use_openai_model or inference_server.startswith('openai') or inference_server.startswith('vllm'):
# supports async_output=True if chosen
if use_openai_model and model_name is None:
model_name = "gpt-3.5-turbo"
# FIXME: Will later import be ignored? I think so, so should be fine
openai, inf_type, deployment_name, base_url, api_version, api_key = set_openai(inference_server)
# Langchain oddly passes some things directly and rest via model_kwargs
model_kwargs = dict(top_p=top_p if do_sample else 1,
frequency_penalty=0,
presence_penalty=1.07 - repetition_penalty + 0.6,
logit_bias=None if inf_type == 'vllm' else {},
)
kwargs_extra = {}
if inf_type == 'openai_chat' or inf_type == 'vllm_chat':
kwargs_extra.update(dict(system_prompt=system_prompt, chat_conversation=chat_conversation))
cls = H2OChatOpenAI
# FIXME: Support context, iinput
if inf_type == 'vllm_chat':
async_sem = asyncio.Semaphore(num_async) if async_output else NullContext()
kwargs_extra.update(dict(tokenizer=tokenizer,
batch_size=1, # https://github.com/h2oai/h2ogpt/issues/928
async_sem=async_sem,
))
openai_api_key = openai.api_key
elif inf_type == 'openai_azure_chat':
cls = H2OAzureChatOpenAI
kwargs_extra.update(
dict(openai_api_type='azure', system_prompt=system_prompt, chat_conversation=chat_conversation))
# FIXME: Support context, iinput
openai_api_key = openai.api_key
elif inf_type == 'openai_azure':
cls = H2OAzureOpenAI
kwargs_extra.update(dict(openai_api_type='azure'))
kwargs_extra.update(model_kwargs)
model_kwargs = {}
# FIXME: Support context, iinput
openai_api_key = openai.api_key
else:
cls = H2OOpenAI
if inf_type == 'vllm':
async_sem = asyncio.Semaphore(num_async) if async_output else NullContext()
kwargs_extra.update(dict(stop_sequences=prompter.stop_sequences,
sanitize_bot_response=sanitize_bot_response,
prompter=prompter,
context=context,
iinput=iinput,
tokenizer=tokenizer,
openai_api_base=openai.api_base,
batch_size=1, # https://github.com/h2oai/h2ogpt/issues/928
client=None,
async_sem=async_sem,
))
kwargs_extra.update(model_kwargs)
model_kwargs = {}
else:
assert inf_type == 'openai' or use_openai_model, inf_type
openai_api_key = openai.api_key
if deployment_name:
kwargs_extra.update(dict(deployment_name=deployment_name))
if api_version:
kwargs_extra.update(dict(openai_api_version=api_version))
elif openai.api_version:
kwargs_extra.update(dict(openai_api_version=openai.api_version))
elif inf_type in ['openai_azure', 'openai_azure_chat']:
# https://github.com/Azure/azure-rest-api-specs/tree/main/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-10-01-preview
kwargs_extra.update(dict(openai_api_version="2023-10-01-preview"))
if base_url:
kwargs_extra.update(dict(openai_api_base=base_url))
else:
kwargs_extra.update(dict(openai_api_base=openai.api_base))
callbacks = [StreamingGradioCallbackHandler(max_time=max_time, verbose=verbose)]
llm = cls(model_name=model_name,
temperature=temperature if do_sample else 0,
# FIXME: Need to count tokens and reduce max_new_tokens to fit like in generate.py
max_tokens=max_new_tokens,
max_new_tokens0=max_new_tokens0,
model_kwargs=model_kwargs,
callbacks=callbacks if stream_output else None,
openai_api_key=openai_api_key,
max_retries=6,
streaming=stream_output,
verbose=verbose,
**kwargs_extra
)
streamer = callbacks[0] if stream_output else None
if inf_type in ['openai', 'openai_chat', 'openai_azure', 'openai_azure_chat']:
prompt_type = inference_server
else:
# vllm goes here
prompt_type = prompt_type or 'plain'
elif inference_server and inference_server.startswith('sagemaker'):
callbacks = [StreamingGradioCallbackHandler(max_time=max_time, verbose=verbose)] # FIXME
streamer = None
endpoint_name = ':'.join(inference_server.split(':')[1:2])
region_name = ':'.join(inference_server.split(':')[2:])
from sagemaker import H2OSagemakerEndpoint, ChatContentHandler, BaseContentHandler
if inference_server.startswith('sagemaker_chat'):
content_handler = ChatContentHandler()
else:
content_handler = BaseContentHandler()
model_kwargs = dict(temperature=temperature if do_sample else 1E-10,
return_full_text=False, top_p=top_p, max_new_tokens=max_new_tokens)
llm = H2OSagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region_name,
aws_access_key_id=os.environ.get('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY'),
model_kwargs=model_kwargs,
content_handler=content_handler,
endpoint_kwargs={'CustomAttributes': 'accept_eula=true'},
tokenizer=tokenizer, # for summarization and token counting
verbose=verbose,
)
elif inference_server:
assert inference_server.startswith(
'http'), "Malformed inference_server=%s. Did you add http:// in front?" % inference_server
from gradio_utils.grclient import GradioClient
from text_generation import Client as HFClient
if isinstance(model, GradioClient):
gradio_server = True
gr_client = model.clone()
hf_client = None
else:
gr_client = None
hf_client = model
assert isinstance(hf_client, HFClient)
inference_server, headers = get_hf_server(inference_server)
# quick sanity check to avoid long timeouts, just see if can reach server
requests.get(inference_server, timeout=int(os.getenv('REQUEST_TIMEOUT_FAST', '10')))
callbacks = [StreamingGradioCallbackHandler(max_time=max_time, verbose=verbose)]
async_sem = asyncio.Semaphore(num_async) if async_output else NullContext()
if gr_client:
chat_client = False
llm = GradioInference(
inference_server_url=inference_server,
return_full_text=False,
temperature=temperature,
top_p=top_p,
top_k=top_k,
penalty_alpha=penalty_alpha,
num_beams=num_beams,
max_new_tokens=max_new_tokens,
min_new_tokens=min_new_tokens,
early_stopping=early_stopping,
max_time=max_time,
repetition_penalty=repetition_penalty,
num_return_sequences=num_return_sequences,
do_sample=do_sample,
chat_client=chat_client,
callbacks=callbacks if stream_output else None,
stream_output=stream_output,
prompter=prompter,
context=context,
iinput=iinput,
client=gr_client,
sanitize_bot_response=sanitize_bot_response,
tokenizer=tokenizer,
system_prompt=system_prompt,
visible_models=visible_models,
h2ogpt_key=h2ogpt_key,
min_max_new_tokens=min_max_new_tokens,
max_input_tokens=max_input_tokens,
async_sem=async_sem,
verbose=verbose,
)
elif hf_client:
# no need to pass original client, no state and fast, so can use same validate_environment from base class
llm = H2OHuggingFaceTextGenInference(
inference_server_url=inference_server,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
return_full_text=False, # this only controls internal behavior, still returns processed text
seed=SEED,
stop_sequences=prompter.stop_sequences,
temperature=temperature,
top_k=top_k,
top_p=top_p,
# typical_p=top_p,
callbacks=callbacks if stream_output else None,
stream_output=stream_output,
prompter=prompter,
context=context,
iinput=iinput,
tokenizer=tokenizer,
timeout=max_time,
sanitize_bot_response=sanitize_bot_response,
async_sem=async_sem,
verbose=verbose,
)
else:
raise RuntimeError("No defined client")
streamer = callbacks[0] if stream_output else None
elif model_name in non_hf_types:
async_output = False # FIXME: not implemented yet
assert langchain_only_model
if model_name == 'llama':
callbacks = [StreamingGradioCallbackHandler(max_time=max_time, verbose=verbose)]
streamer = callbacks[0] if stream_output else None
else:
# stream_output = False
# doesn't stream properly as generator, but at least
callbacks = [streaming_stdout.StreamingStdOutCallbackHandler()]
streamer = None
if prompter:
prompt_type = prompter.prompt_type
else:
prompter = Prompter(prompt_type, prompt_dict, debug=False, chat=False, stream_output=stream_output)
pass # assume inputted prompt_type is correct
from gpt4all_llm import get_llm_gpt4all
llm = get_llm_gpt4all(model_name,
model=model,
max_new_tokens=max_new_tokens,
temperature=temperature,
repetition_penalty=repetition_penalty,
top_k=top_k,
top_p=top_p,
callbacks=callbacks,
n_jobs=n_jobs,
verbose=verbose,
streaming=stream_output,
prompter=prompter,
context=context,
iinput=iinput,
max_seq_len=model_max_length,
llamacpp_dict=llamacpp_dict,
)
elif hasattr(model, 'is_exlama') and model.is_exlama():
async_output = False # FIXME: not implemented yet
assert langchain_only_model
callbacks = [StreamingGradioCallbackHandler(max_time=max_time, verbose=verbose)]
streamer = callbacks[0] if stream_output else None
if exllama_dict is None:
exllama_dict = {}
from src.llm_exllama import Exllama
llm = Exllama(streaming=stream_output,
model_path=None,
model=model,
lora_path=None,
temperature=temperature,
top_k=top_k,
top_p=top_p,
typical=.7,
beams=1,
# beam_length = 40,
stop_sequences=prompter.stop_sequences,
callbacks=callbacks,
verbose=verbose,
max_seq_len=model_max_length,
fused_attn=False,
**exllama_dict,
# alpha_value = 1.0, #For use with any models
# compress_pos_emb = 4.0, #For use with superhot
# set_auto_map = "3, 2" #Gpu split, this will split 3gigs/2gigs
prompter=prompter,
context=context,
iinput=iinput,
)
else:
async_output = False # FIXME: not implemented yet
if model is None:
# only used if didn't pass model in
assert tokenizer is None or isinstance(tokenizer, FakeTokenizer)
prompt_type = 'human_bot'
if model_name is None:
model_name = 'h2oai/h2ogpt-oasst1-512-12b'
# model_name = 'h2oai/h2ogpt-oig-oasst1-512-6_9b'
# model_name = 'h2oai/h2ogpt-oasst1-512-20b'
inference_server = ''
model, tokenizer, device = get_model(load_8bit=True, base_model=model_name,
inference_server=inference_server, gpu_id=0)
gen_kwargs = dict(do_sample=do_sample,
num_beams=num_beams,
max_new_tokens=max_new_tokens,
min_new_tokens=min_new_tokens,
early_stopping=early_stopping,
max_time=max_time,
repetition_penalty=repetition_penalty,
num_return_sequences=num_return_sequences,
return_full_text=not only_new_text,
handle_long_generation=None)
if do_sample:
gen_kwargs.update(dict(temperature=temperature,
top_k=top_k,
top_p=top_p,
penalty_alpha=penalty_alpha))
assert len(set(gen_hyper).difference(gen_kwargs.keys())) == 0
else:
gen_kwargs.update(dict(penalty_alpha=penalty_alpha))
assert len(set(gen_hyper0).difference(gen_kwargs.keys())) == 0
if stream_output:
skip_prompt = only_new_text
from gen import H2OTextIteratorStreamer
decoder_kwargs = {}
streamer = H2OTextIteratorStreamer(tokenizer, skip_prompt=skip_prompt, block=False, **decoder_kwargs)
gen_kwargs.update(dict(streamer=streamer))
else:
streamer = None
from h2oai_pipeline import H2OTextGenerationPipeline
pipe = H2OTextGenerationPipeline(model=model, use_prompter=True,
prompter=prompter,
context=context,
iinput=iinput,
prompt_type=prompt_type,
prompt_dict=prompt_dict,
sanitize_bot_response=sanitize_bot_response,
chat=False, stream_output=stream_output,
tokenizer=tokenizer,
max_input_tokens=max_input_tokens,
base_model=model_name,
verbose=verbose,
truncation_generation=truncation_generation,
**gen_kwargs)
# pipe.task = "text-generation"
# below makes it listen only to our prompt removal,
# not built in prompt removal that is less general and not specific for our model
pipe.task = "text2text-generation"
llm = H2OHuggingFacePipeline(pipeline=pipe)
return llm, model_name, streamer, prompt_type, async_output, only_new_text, gradio_server
def get_device_dtype():
# torch.device("cuda") leads to cuda:x cuda:y mismatches for multi-GPU consistently
import torch
n_gpus = torch.cuda.device_count() if torch.cuda.is_available else 0
device = 'cpu' if n_gpus == 0 else 'cuda'
# from utils import NullContext
# context_class = NullContext if n_gpus > 1 or n_gpus == 0 else context_class
context_class = torch.device
torch_dtype = torch.float16 if device == 'cuda' else torch.float32
return device, torch_dtype, context_class
def get_wiki_data(title, first_paragraph_only, text_limit=None, take_head=True):
"""
Get wikipedia data from online
:param title:
:param first_paragraph_only:
:param text_limit:
:param take_head:
:return:
"""
filename = 'wiki_%s_%s_%s_%s.data' % (first_paragraph_only, title, text_limit, take_head)
url = f"https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&explaintext=1&titles={title}"
if first_paragraph_only:
url += "&exintro=1"
import json
if not os.path.isfile(filename):
data = requests.get(url).json()
json.dump(data, open(filename, 'wt'))
else:
data = json.load(open(filename, "rt"))
page_content = list(data["query"]["pages"].values())[0]["extract"]
if take_head is not None and text_limit is not None:
page_content = page_content[:text_limit] if take_head else page_content[-text_limit:]
title_url = str(title).replace(' ', '_')
return Document(
page_content=str(page_content),
metadata={"source": f"https://en.wikipedia.org/wiki/{title_url}"},
)
def get_wiki_sources(first_para=True, text_limit=None):
"""
Get specific named sources from wikipedia
:param first_para:
:param text_limit:
:return:
"""
default_wiki_sources = ['Unix', 'Microsoft_Windows', 'Linux']
wiki_sources = list(os.getenv('WIKI_SOURCES', default_wiki_sources))
return [get_wiki_data(x, first_para, text_limit=text_limit) for x in wiki_sources]
def get_github_docs(repo_owner, repo_name):
"""
Access github from specific repo
:param repo_owner:
:param repo_name:
:return:
"""
with tempfile.TemporaryDirectory() as d:
subprocess.check_call(
f"git clone --depth 1 https://github.com/{repo_owner}/{repo_name}.git .",
cwd=d,
shell=True,
)
git_sha = (
subprocess.check_output("git rev-parse HEAD", shell=True, cwd=d)
.decode("utf-8")
.strip()
)
repo_path = pathlib.Path(d)
markdown_files = list(repo_path.glob("*/*.md")) + list(
repo_path.glob("*/*.mdx")
)
for markdown_file in markdown_files:
with open(markdown_file, "r") as f:
relative_path = markdown_file.relative_to(repo_path)
github_url = f"https://github.com/{repo_owner}/{repo_name}/blob/{git_sha}/{relative_path}"
yield Document(page_content=str(f.read()), metadata={"source": github_url})
def get_dai_pickle(dest="."):
from huggingface_hub import hf_hub_download
# True for case when locally already logged in with correct token, so don't have to set key
token = os.getenv('HUGGING_FACE_HUB_TOKEN', True)
path_to_zip_file = hf_hub_download('h2oai/dai_docs', 'dai_docs.pickle', token=token, repo_type='dataset')
shutil.copy(path_to_zip_file, dest)
def get_dai_docs(from_hf=False, get_pickle=True):
"""
Consume DAI documentation, or consume from public pickle
:param from_hf: get DAI docs from HF, then generate pickle for later use by LangChain
:param get_pickle: Avoid raw DAI docs, just get pickle directly from HF
:return:
"""
import pickle
if get_pickle:
get_dai_pickle()
dai_store = 'dai_docs.pickle'
dst = "working_dir_docs"
if not os.path.isfile(dai_store):
from create_data import setup_dai_docs
dst = setup_dai_docs(dst=dst, from_hf=from_hf)
import glob
files = list(glob.glob(os.path.join(dst, '*rst'), recursive=True))
basedir = os.path.abspath(os.getcwd())
from create_data import rst_to_outputs
new_outputs = rst_to_outputs(files)
os.chdir(basedir)
pickle.dump(new_outputs, open(dai_store, 'wb'))
else:
new_outputs = pickle.load(open(dai_store, 'rb'))
sources = []
for line, file in new_outputs:
# gradio requires any linked file to be with app.py
sym_src = os.path.abspath(os.path.join(dst, file))
sym_dst = os.path.abspath(os.path.join(os.getcwd(), file))
if os.path.lexists(sym_dst):
os.remove(sym_dst)
os.symlink(sym_src, sym_dst)
itm = Document(page_content=str(line), metadata={"source": file})
# NOTE: yield has issues when going into db, loses metadata
# yield itm
sources.append(itm)
return sources
def get_supported_types():
non_image_types0 = ["pdf", "txt", "csv", "toml", "py", "rst", "xml", "rtf",
"md",
"html", "mhtml", "htm",
"enex", "eml", "epub", "odt", "pptx", "ppt",
"zip",
"gz",
"gzip",
"urls",
]
# "msg", GPL3
video_types0 = ['WEBM',
'MPG', 'MP2', 'MPEG', 'MPE', '.PV',
'OGG',
'MP4', 'M4P', 'M4V',
'AVI', 'WMV',
'MOV', 'QT',
'FLV', 'SWF',
'AVCHD']
video_types0 = [x.lower() for x in video_types0]
if have_pillow:
from PIL import Image
exts = Image.registered_extensions()
image_types0 = {ex for ex, f in exts.items() if f in Image.OPEN if ex not in video_types0 + non_image_types0}
image_types0 = sorted(image_types0)
image_types0 = [x[1:] if x.startswith('.') else x for x in image_types0]
else:
image_types0 = []
return non_image_types0, image_types0, video_types0
non_image_types, image_types, video_types = get_supported_types()
set_image_types = set(image_types)
if have_libreoffice or True:
# or True so it tries to load, e.g. on MAC/Windows, even if don't have libreoffice since works without that
non_image_types.extend(["docx", "doc", "xls", "xlsx"])
if have_jq:
non_image_types.extend(["json", "jsonl"])
file_types = non_image_types + image_types
def try_as_html(file):
# try treating as html as occurs when scraping websites
from bs4 import BeautifulSoup
with open(file, "rt") as f:
try:
is_html = bool(BeautifulSoup(f.read(), "html.parser").find())
except: # FIXME
is_html = False
if is_html:
file_url = 'file://' + file
doc1 = UnstructuredURLLoader(urls=[file_url]).load()
doc1 = [x for x in doc1 if x.page_content]
else:
doc1 = []
return doc1
def json_metadata_func(record: dict, metadata: dict) -> dict:
# Define the metadata extraction function.
if isinstance(record, dict):
metadata["sender_name"] = record.get("sender_name")
metadata["timestamp_ms"] = record.get("timestamp_ms")
if "source" in metadata:
metadata["source_json"] = metadata['source']
if "seq_num" in metadata:
metadata["seq_num_json"] = metadata['seq_num']
return metadata
def get_num_pages(file):
try:
import fitz
src = fitz.open(file)
return len(src)
except:
return None
def get_each_page(file):
import fitz
pages = []
src = fitz.open(file)
for page in src:
tar = fitz.open() # output PDF for 1 page
# copy over current page
tar.insert_pdf(src, from_page=page.number, to_page=page.number)
page = f"{file}-page-{page.number}.pdf"
tar.save(page)
tar.close()
pages.append(page)
return pages
def file_to_doc(file,
filei=0,
base_path=None, verbose=False, fail_any_exception=False,
chunk=True, chunk_size=512, n_jobs=-1,
is_url=False, is_txt=False,
# urls
use_unstructured=True,
use_playwright=False,
use_selenium=False,
# pdfs
use_pymupdf='auto',
use_unstructured_pdf='auto',
use_pypdf='auto',
enable_pdf_ocr='auto',
try_pdf_as_html='auto',
enable_pdf_doctr='auto',
# images
enable_ocr=False,
enable_doctr=False,
enable_pix2struct=False,
enable_captions=True,
captions_model=None,
model_loaders=None,
# json
jq_schema='.[]',
headsize=50, # see also H2OSerpAPIWrapper
db_type=None,
selected_file_types=None):
assert isinstance(model_loaders, dict)
if selected_file_types is not None:
set_image_types1 = set_image_types.intersection(set(selected_file_types))
else:
set_image_types1 = set_image_types
assert db_type is not None
chunk_sources = functools.partial(_chunk_sources, chunk=chunk, chunk_size=chunk_size, db_type=db_type)
add_meta = functools.partial(_add_meta, headsize=headsize, filei=filei)
# FIXME: if zip, file index order will not be correct if other files involved
path_to_docs_func = functools.partial(path_to_docs,
verbose=verbose,
fail_any_exception=fail_any_exception,
n_jobs=n_jobs,
chunk=chunk, chunk_size=chunk_size,
# url=file if is_url else None,
# text=file if is_txt else None,
# urls
use_unstructured=use_unstructured,
use_playwright=use_playwright,
use_selenium=use_selenium,
# pdfs
use_pymupdf=use_pymupdf,
use_unstructured_pdf=use_unstructured_pdf,
use_pypdf=use_pypdf,
enable_pdf_ocr=enable_pdf_ocr,
enable_pdf_doctr=enable_pdf_doctr,
try_pdf_as_html=try_pdf_as_html,
# images
enable_ocr=enable_ocr,
enable_doctr=enable_doctr,
enable_pix2struct=enable_pix2struct,
enable_captions=enable_captions,
captions_model=captions_model,
caption_loader=model_loaders['caption'],
doctr_loader=model_loaders['doctr'],
pix2struct_loader=model_loaders['pix2struct'],
# json
jq_schema=jq_schema,
db_type=db_type,
)
if file is None:
if fail_any_exception:
raise RuntimeError("Unexpected None file")
else:
return []
doc1 = [] # in case no support, or disabled support
if base_path is None and not is_txt and not is_url:
# then assume want to persist but don't care which path used
# can't be in base_path
dir_name = os.path.dirname(file)
base_name = os.path.basename(file)
# if from gradio, will have its own temp uuid too, but that's ok
base_name = sanitize_filename(base_name) + "_" + str(uuid.uuid4())[:10]
base_path = os.path.join(dir_name, base_name)
if is_url:
file = file.strip() # in case accidental spaces in front or at end
file_lower = file.lower()
case1 = file_lower.startswith('arxiv:') and len(file_lower.split('arxiv:')) == 2
case2 = file_lower.startswith('https://arxiv.org/abs') and len(file_lower.split('https://arxiv.org/abs')) == 2
case3 = file_lower.startswith('http://arxiv.org/abs') and len(file_lower.split('http://arxiv.org/abs')) == 2
case4 = file_lower.startswith('arxiv.org/abs/') and len(file_lower.split('arxiv.org/abs/')) == 2
if case1 or case2 or case3 or case4:
if case1:
query = file.lower().split('arxiv:')[1].strip()
elif case2:
query = file.lower().split('https://arxiv.org/abs/')[1].strip()
elif case2:
query = file.lower().split('http://arxiv.org/abs/')[1].strip()
elif case3:
query = file.lower().split('arxiv.org/abs/')[1].strip()
else:
raise RuntimeError("Unexpected arxiv error for %s" % file)
if have_arxiv:
trials = 3
docs1 = []
for trial in range(trials):
try:
docs1 = ArxivLoader(query=query, load_max_docs=20, load_all_available_meta=True).load()
break
except urllib.error.URLError:
pass
if not docs1:
print("Failed to get arxiv %s" % query, flush=True)
# ensure string, sometimes None
[[x.metadata.update({k: str(v)}) for k, v in x.metadata.items()] for x in docs1]
query_url = f"https://arxiv.org/abs/{query}"
[x.metadata.update(
dict(source=x.metadata.get('entry_id', query_url), query=query_url,
input_type='arxiv', head=x.metadata.get('Title', ''), date=str(datetime.now))) for x in
docs1]
else:
docs1 = []
else:
if not (file.startswith("http://") or file.startswith("file://") or file.startswith("https://")):
file = 'http://' + file
docs1 = []
do_unstructured = only_unstructured_urls or use_unstructured
if only_selenium or only_playwright:
do_unstructured = False
do_playwright = have_playwright and (use_playwright or only_playwright)
if only_unstructured_urls or only_selenium:
do_playwright = False
do_selenium = have_selenium and (use_selenium or only_selenium)
if only_unstructured_urls or only_playwright:
do_selenium = False
if do_unstructured or use_unstructured:
docs1a = UnstructuredURLLoader(urls=[file]).load()
docs1a = [x for x in docs1a if x.page_content]
add_parser(docs1a, 'UnstructuredURLLoader')
docs1.extend(docs1a)
if len(docs1) == 0 and have_playwright or do_playwright:
# then something went wrong, try another loader:
from langchain.document_loaders import PlaywrightURLLoader
docs1a = asyncio.run(PlaywrightURLLoader(urls=[file]).aload())
# docs1 = PlaywrightURLLoader(urls=[file]).load()
docs1a = [x for x in docs1a if x.page_content]
add_parser(docs1a, 'PlaywrightURLLoader')
docs1.extend(docs1a)
if len(docs1) == 0 and have_selenium or do_selenium:
# then something went wrong, try another loader:
# but requires Chrome binary, else get: selenium.common.exceptions.WebDriverException:
# Message: unknown error: cannot find Chrome binary
from langchain.document_loaders import SeleniumURLLoader
from selenium.common.exceptions import WebDriverException
try:
docs1a = SeleniumURLLoader(urls=[file]).load()
docs1a = [x for x in docs1a if x.page_content]
add_parser(docs1a, 'SeleniumURLLoader')
docs1.extend(docs1a)
except WebDriverException as e:
print("No web driver: %s" % str(e), flush=True)
[x.metadata.update(dict(input_type='url', date=str(datetime.now))) for x in docs1]
add_meta(docs1, file, parser="is_url")
docs1 = clean_doc(docs1)
doc1 = chunk_sources(docs1)
elif is_txt:
base_path = "user_paste"
base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True)
source_file = os.path.join(base_path, "_%s.txt" % str(uuid.uuid4())[:10])
with open(source_file, "wt") as f:
f.write(file)
metadata = dict(source=source_file, date=str(datetime.now()), input_type='pasted txt')
doc1 = Document(page_content=str(file), metadata=metadata)
add_meta(doc1, file, parser="f.write")
# Bit odd to change if was original text
# doc1 = clean_doc(doc1)
elif file.lower().endswith('.html') or file.lower().endswith('.mhtml') or file.lower().endswith('.htm'):
docs1 = UnstructuredHTMLLoader(file_path=file).load()
add_meta(docs1, file, parser='UnstructuredHTMLLoader')
docs1 = clean_doc(docs1)
doc1 = chunk_sources(docs1, language=Language.HTML)
elif (file.lower().endswith('.docx') or file.lower().endswith('.doc')) and (have_libreoffice or True):
docs1 = UnstructuredWordDocumentLoader(file_path=file).load()
add_meta(docs1, file, parser='UnstructuredWordDocumentLoader')
doc1 = chunk_sources(docs1)
elif (file.lower().endswith('.xlsx') or file.lower().endswith('.xls')) and (have_libreoffice or True):
docs1 = UnstructuredExcelLoader(file_path=file).load()
add_meta(docs1, file, parser='UnstructuredExcelLoader')
doc1 = chunk_sources(docs1)
elif file.lower().endswith('.odt'):
docs1 = UnstructuredODTLoader(file_path=file).load()
add_meta(docs1, file, parser='UnstructuredODTLoader')
doc1 = chunk_sources(docs1)
elif file.lower().endswith('pptx') or file.lower().endswith('ppt'):
docs1 = UnstructuredPowerPointLoader(file_path=file).load()
add_meta(docs1, file, parser='UnstructuredPowerPointLoader')
docs1 = clean_doc(docs1)
doc1 = chunk_sources(docs1)
elif file.lower().endswith('.txt'):
# use UnstructuredFileLoader ?
docs1 = TextLoader(file, encoding="utf8", autodetect_encoding=True).load()
# makes just one, but big one
doc1 = chunk_sources(docs1)
# Bit odd to change if was original text
# doc1 = clean_doc(doc1)
add_meta(doc1, file, parser='TextLoader')
elif file.lower().endswith('.rtf'):
docs1 = UnstructuredRTFLoader(file).load()
add_meta(docs1, file, parser='UnstructuredRTFLoader')
doc1 = chunk_sources(docs1)
elif file.lower().endswith('.md'):
docs1 = UnstructuredMarkdownLoader(file).load()
add_meta(docs1, file, parser='UnstructuredMarkdownLoader')
docs1 = clean_doc(docs1)
doc1 = chunk_sources(docs1, language=Language.MARKDOWN)
elif file.lower().endswith('.enex'):
docs1 = EverNoteLoader(file).load()
add_meta(doc1, file, parser='EverNoteLoader')
doc1 = chunk_sources(docs1)
elif file.lower().endswith('.epub'):
docs1 = UnstructuredEPubLoader(file).load()
add_meta(docs1, file, parser='UnstructuredEPubLoader')
doc1 = chunk_sources(docs1)
elif any(file.lower().endswith(x) for x in set_image_types1):
docs1 = []
if verbose:
print("BEGIN: Tesseract", flush=True)
if have_tesseract and enable_ocr:
# OCR, somewhat works, but not great
docs1a = UnstructuredImageLoader(file, strategy='ocr_only').load()
# docs1a = UnstructuredImageLoader(file, strategy='hi_res').load()
docs1a = [x for x in docs1a if x.page_content]
add_meta(docs1a, file, parser='UnstructuredImageLoader')
docs1.extend(docs1a)
if verbose:
print("END: Tesseract", flush=True)
if have_doctr and enable_doctr:
if verbose:
print("BEGIN: DocTR", flush=True)
if model_loaders['doctr'] is not None and not isinstance(model_loaders['doctr'], (str, bool)):
if verbose:
print("Reuse DocTR", flush=True)
model_loaders['doctr'].load_model()
else:
if verbose:
print("Fresh DocTR", flush=True)
from image_doctr import H2OOCRLoader
model_loaders['doctr'] = H2OOCRLoader(layout_aware=True)
model_loaders['doctr'].set_document_paths([file])
docs1c = model_loaders['doctr'].load()
docs1c = [x for x in docs1c if x.page_content]
add_meta(docs1c, file, parser='H2OOCRLoader: %s' % 'DocTR')
# caption didn't set source, so fix-up meta
hash_of_file = hash_file(file)
[doci.metadata.update(source=file, hashid=hash_of_file) for doci in docs1c]
docs1.extend(docs1c)
if verbose:
print("END: DocTR", flush=True)
if enable_captions:
# BLIP
if verbose:
print("BEGIN: BLIP", flush=True)
if model_loaders['caption'] is not None and not isinstance(model_loaders['caption'], (str, bool)):
# assumes didn't fork into this process with joblib, else can deadlock
if verbose:
print("Reuse BLIP", flush=True)
model_loaders['caption'].load_model()
else:
if verbose:
print("Fresh BLIP", flush=True)
from image_captions import H2OImageCaptionLoader
model_loaders['caption'] = H2OImageCaptionLoader(caption_gpu=model_loaders['caption'] == 'gpu',
blip_model=captions_model,
blip_processor=captions_model)
model_loaders['caption'].set_image_paths([file])
docs1c = model_loaders['caption'].load()
docs1c = [x for x in docs1c if x.page_content]
add_meta(docs1c, file, parser='H2OImageCaptionLoader: %s' % captions_model)
# caption didn't set source, so fix-up meta
hash_of_file = hash_file(file)
[doci.metadata.update(source=file, hashid=hash_of_file) for doci in docs1c]
docs1.extend(docs1c)
if verbose:
print("END: BLIP", flush=True)
if enable_pix2struct:
# BLIP
if verbose:
print("BEGIN: Pix2Struct", flush=True)
if model_loaders['pix2struct'] is not None and not isinstance(model_loaders['pix2struct'], (str, bool)):
if verbose:
print("Reuse pix2struct", flush=True)
model_loaders['pix2struct'].load_model()
else:
if verbose:
print("Fresh pix2struct", flush=True)
from image_pix2struct import H2OPix2StructLoader
model_loaders['pix2struct'] = H2OPix2StructLoader()
model_loaders['pix2struct'].set_image_paths([file])
docs1c = model_loaders['pix2struct'].load()
docs1c = [x for x in docs1c if x.page_content]
add_meta(docs1c, file, parser='H2OPix2StructLoader: %s' % model_loaders['pix2struct'])
# caption didn't set source, so fix-up meta
hash_of_file = hash_file(file)
[doci.metadata.update(source=file, hashid=hash_of_file) for doci in docs1c]
docs1.extend(docs1c)
if verbose:
print("END: Pix2Struct", flush=True)
doc1 = chunk_sources(docs1)
elif file.lower().endswith('.msg'):
raise RuntimeError("Not supported, GPL3 license")
# docs1 = OutlookMessageLoader(file).load()
# docs1[0].metadata['source'] = file
elif file.lower().endswith('.eml'):
try:
docs1 = UnstructuredEmailLoader(file).load()
add_meta(docs1, file, parser='UnstructuredEmailLoader')
doc1 = chunk_sources(docs1)
except ValueError as e:
if 'text/html content not found in email' in str(e):
pass
else:
raise
doc1 = [x for x in doc1 if x.page_content]
if len(doc1) == 0:
# e.g. plain/text dict key exists, but not
# doc1 = TextLoader(file, encoding="utf8").load()
docs1 = UnstructuredEmailLoader(file, content_source="text/plain").load()
docs1 = [x for x in docs1 if x.page_content]
add_meta(docs1, file, parser='UnstructuredEmailLoader text/plain')
doc1 = chunk_sources(docs1)
# elif file.lower().endswith('.gcsdir'):
# doc1 = GCSDirectoryLoader(project_name, bucket, prefix).load()
# elif file.lower().endswith('.gcsfile'):
# doc1 = GCSFileLoader(project_name, bucket, blob).load()
elif file.lower().endswith('.rst'):
with open(file, "r") as f:
doc1 = Document(page_content=str(f.read()), metadata={"source": file})
add_meta(doc1, file, parser='f.read()')
doc1 = chunk_sources(doc1, language=Language.RST)
elif file.lower().endswith('.json'):
# 10k rows, 100 columns-like parts 4 bytes each
JSON_SIZE_LIMIT = int(os.getenv('JSON_SIZE_LIMIT', str(10 * 10 * 1024 * 10 * 4)))
if os.path.getsize(file) > JSON_SIZE_LIMIT:
raise ValueError(
"JSON file sizes > %s not supported for naive parsing and embedding, requires Agents enabled" % JSON_SIZE_LIMIT)
loader = JSONLoader(
file_path=file,
# jq_schema='.messages[].content',
jq_schema=jq_schema,
text_content=False,
metadata_func=json_metadata_func)
doc1 = loader.load()
add_meta(doc1, file, parser='JSONLoader: %s' % jq_schema)
fix_json_meta(doc1)
elif file.lower().endswith('.jsonl'):
loader = JSONLoader(
file_path=file,
# jq_schema='.messages[].content',
jq_schema=jq_schema,
json_lines=True,
text_content=False,
metadata_func=json_metadata_func)
doc1 = loader.load()
add_meta(doc1, file, parser='JSONLoader: %s' % jq_schema)
fix_json_meta(doc1)
elif file.lower().endswith('.pdf'):
# migration
if isinstance(use_pymupdf, bool):
if use_pymupdf == False:
use_pymupdf = 'off'
if use_pymupdf == True:
use_pymupdf = 'on'
if isinstance(use_unstructured_pdf, bool):
if use_unstructured_pdf == False:
use_unstructured_pdf = 'off'
if use_unstructured_pdf == True:
use_unstructured_pdf = 'on'
if isinstance(use_pypdf, bool):
if use_pypdf == False:
use_pypdf = 'off'
if use_pypdf == True:
use_pypdf = 'on'
if isinstance(enable_pdf_ocr, bool):
if enable_pdf_ocr == False:
enable_pdf_ocr = 'off'
if enable_pdf_ocr == True:
enable_pdf_ocr = 'on'
if isinstance(try_pdf_as_html, bool):
if try_pdf_as_html == False:
try_pdf_as_html = 'off'
if try_pdf_as_html == True:
try_pdf_as_html = 'on'
num_pages = get_num_pages(file)
doc1 = []
tried_others = False
handled = False
did_pymupdf = False
did_unstructured = False
e = None
if have_pymupdf and (len(doc1) == 0 and use_pymupdf == 'auto' or use_pymupdf == 'on'):
# GPL, only use if installed
from langchain.document_loaders import PyMuPDFLoader
# load() still chunks by pages, but every page has title at start to help
try:
doc1a = PyMuPDFLoader(file).load()
did_pymupdf = True
except BaseException as e0:
doc1a = []
print("PyMuPDFLoader: %s" % str(e0), flush=True)
e = e0
# remove empty documents
handled |= len(doc1a) > 0
doc1a = [x for x in doc1a if x.page_content]
doc1a = clean_doc(doc1a)
add_parser(doc1a, 'PyMuPDFLoader')
doc1.extend(doc1a)
# do OCR/tesseract if only 2 page and auto, since doctr superior and faster
if (len(doc1) == 0 or num_pages is not None and num_pages < 2) and use_unstructured_pdf == 'auto' \
or use_unstructured_pdf == 'on':
tried_others = True
try:
doc1a = UnstructuredPDFLoader(file).load()
did_unstructured = True
except BaseException as e0:
doc1a = []
print("UnstructuredPDFLoader: %s" % str(e0), flush=True)
e = e0
handled |= len(doc1a) > 0
# remove empty documents
doc1a = [x for x in doc1a if x.page_content]
add_parser(doc1a, 'UnstructuredPDFLoader')
# seems to not need cleaning in most cases
doc1.extend(doc1a)
if len(doc1) == 0 and use_pypdf == 'auto' or use_pypdf == 'on':
tried_others = True
# open-source fallback
# load() still chunks by pages, but every page has title at start to help
try:
doc1a = PyPDFLoader(file).load()
except BaseException as e0:
doc1a = []
print("PyPDFLoader: %s" % str(e0), flush=True)
e = e0
handled |= len(doc1a) > 0
# remove empty documents
doc1a = [x for x in doc1a if x.page_content]
doc1a = clean_doc(doc1a)
add_parser(doc1a, 'PyPDFLoader')
doc1.extend(doc1a)
if not did_pymupdf and ((have_pymupdf and len(doc1) == 0) and tried_others):
# try again in case only others used, but only if didn't already try (2nd part of and)
# GPL, only use if installed
from langchain.document_loaders import PyMuPDFLoader
# load() still chunks by pages, but every page has title at start to help
try:
doc1a = PyMuPDFLoader(file).load()
except BaseException as e0:
doc1a = []
print("PyMuPDFLoader: %s" % str(e0), flush=True)
e = e0
handled |= len(doc1a) > 0
# remove empty documents
doc1a = [x for x in doc1a if x.page_content]
doc1a = clean_doc(doc1a)
add_parser(doc1a, 'PyMuPDFLoader2')
doc1.extend(doc1a)
did_pdf_ocr = False
if len(doc1) == 0 and (enable_pdf_ocr == 'auto' and enable_pdf_doctr != 'on') or enable_pdf_ocr == 'on':
did_pdf_ocr = True
# no did_unstructured condition here because here we do OCR, and before we did not
# try OCR in end since slowest, but works on pure image pages well
doc1a = UnstructuredPDFLoader(file, strategy='ocr_only').load()
handled |= len(doc1a) > 0
# remove empty documents
doc1a = [x for x in doc1a if x.page_content]
add_parser(doc1a, 'UnstructuredPDFLoader ocr_only')
# seems to not need cleaning in most cases
doc1.extend(doc1a)
# Some PDFs return nothing or junk from PDFMinerLoader
# if auto, do doctr pdf if not too many pages, else can be slow/expensive
if (len(doc1) == 0 or num_pages is not None and num_pages < 100) and enable_pdf_doctr == 'auto' or \
enable_pdf_doctr == 'on':
if verbose:
print("BEGIN: DocTR", flush=True)
if model_loaders['doctr'] is not None and not isinstance(model_loaders['doctr'], (str, bool)):
model_loaders['doctr'].load_model()
else:
from image_doctr import H2OOCRLoader
model_loaders['doctr'] = H2OOCRLoader(layout_aware=True)
# avoid having all pages in memory at same time, for large PDFs leads to system OOM
try:
pages = get_each_page(file)
got_pages = True
except:
# FIXME: protection for now, unsure how generally will work
pages = [file]
got_pages = False
model_loaders['doctr'].set_document_paths(pages)
doc1a = model_loaders['doctr'].load()
doc1a = [x for x in doc1a if x.page_content]
add_meta(doc1a, file, parser='H2OOCRLoader: %s' % 'DocTR')
handled |= len(doc1a) > 0
if got_pages:
for page in pages:
remove(page)
# caption didn't set source, so fix-up meta
hash_of_file = hash_file(file)
[doci.metadata.update(source=file, hashid=hash_of_file) for doci in doc1a]
doc1.extend(doc1a)
if verbose:
print("END: DocTR", flush=True)
if try_pdf_as_html in ['auto', 'on']:
doc1a = try_as_html(file)
add_parser(doc1a, 'try_as_html')
doc1.extend(doc1a)
if len(doc1) == 0:
# if literally nothing, show failed to parse so user knows, since unlikely nothing in PDF at all.
if handled:
raise ValueError("%s had no valid text, but meta data was parsed" % file)
else:
raise ValueError("%s had no valid text and no meta data was parsed: %s" % (file, str(e)))
add_meta(doc1, file, parser='pdf')
doc1 = chunk_sources(doc1)
elif file.lower().endswith('.csv'):
CSV_SIZE_LIMIT = int(os.getenv('CSV_SIZE_LIMIT', str(10 * 1024 * 10 * 4)))
if os.path.getsize(file) > CSV_SIZE_LIMIT:
raise ValueError(
"CSV file sizes > %s not supported for naive parsing and embedding, requires Agents enabled" % CSV_SIZE_LIMIT)
doc1 = CSVLoader(file).load()
add_meta(doc1, file, parser='CSVLoader')
if isinstance(doc1, list):
# each row is a Document, identify
[x.metadata.update(dict(chunk_id=chunk_id)) for chunk_id, x in enumerate(doc1)]
if db_type in ['chroma', 'chroma_old']:
# then separate summarize list
sdoc1 = clone_documents(doc1)
[x.metadata.update(dict(chunk_id=-1)) for chunk_id, x in enumerate(sdoc1)]
doc1 = sdoc1 + doc1
elif file.lower().endswith('.py'):
doc1 = PythonLoader(file).load()
add_meta(doc1, file, parser='PythonLoader')
doc1 = chunk_sources(doc1, language=Language.PYTHON)
elif file.lower().endswith('.toml'):
doc1 = TomlLoader(file).load()
add_meta(doc1, file, parser='TomlLoader')
doc1 = chunk_sources(doc1)
elif file.lower().endswith('.xml'):
from langchain.document_loaders import UnstructuredXMLLoader
loader = UnstructuredXMLLoader(file_path=file)
doc1 = loader.load()
add_meta(doc1, file, parser='UnstructuredXMLLoader')
elif file.lower().endswith('.urls'):
with open(file, "r") as f:
urls = f.readlines()
# recurse
doc1 = path_to_docs_func(None, url=urls)
elif file.lower().endswith('.zip'):
with zipfile.ZipFile(file, 'r') as zip_ref:
# don't put into temporary path, since want to keep references to docs inside zip
# so just extract in path where
zip_ref.extractall(base_path)
# recurse
doc1 = path_to_docs_func(base_path)
elif file.lower().endswith('.gz') or file.lower().endswith('.gzip'):
if file.lower().endswith('.gz'):
de_file = file.lower().replace('.gz', '')
else:
de_file = file.lower().replace('.gzip', '')
with gzip.open(file, 'rb') as f_in:
with open(de_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# recurse
doc1 = file_to_doc(de_file,
filei=filei, # single file, same file index as outside caller
base_path=base_path, verbose=verbose, fail_any_exception=fail_any_exception,
chunk=chunk, chunk_size=chunk_size, n_jobs=n_jobs,
is_url=is_url, is_txt=is_txt,
# urls
use_unstructured=use_unstructured,
use_playwright=use_playwright,
use_selenium=use_selenium,
# pdfs
use_pymupdf=use_pymupdf,
use_unstructured_pdf=use_unstructured_pdf,
use_pypdf=use_pypdf,
enable_pdf_ocr=enable_pdf_ocr,
enable_pdf_doctr=enable_pdf_doctr,
try_pdf_as_html=try_pdf_as_html,
# images
enable_ocr=enable_ocr,
enable_doctr=enable_doctr,
enable_pix2struct=enable_pix2struct,
enable_captions=enable_captions,
captions_model=captions_model,
model_loaders=model_loaders,
# json
jq_schema=jq_schema,
headsize=headsize,
db_type=db_type,
selected_file_types=selected_file_types)
else:
raise RuntimeError("No file handler for %s" % os.path.basename(file))
# allow doc1 to be list or not.
if not isinstance(doc1, list):
# If not list, did not chunk yet, so chunk now
docs = chunk_sources([doc1])
elif isinstance(doc1, list) and len(doc1) == 1:
# if list of length one, don't trust and chunk it, chunk_id's will still be correct if repeat
docs = chunk_sources(doc1)
else:
docs = doc1
assert isinstance(docs, list)
return docs
def path_to_doc1(file,
filei=0,
verbose=False, fail_any_exception=False, return_file=True,
chunk=True, chunk_size=512,
n_jobs=-1,
is_url=False, is_txt=False,
# urls
use_unstructured=True,
use_playwright=False,
use_selenium=False,
# pdfs
use_pymupdf='auto',
use_unstructured_pdf='auto',
use_pypdf='auto',
enable_pdf_ocr='auto',
enable_pdf_doctr='auto',
try_pdf_as_html='auto',
# images
enable_ocr=False,
enable_doctr=False,
enable_pix2struct=False,
enable_captions=True,
captions_model=None,
model_loaders=None,
# json
jq_schema='.[]',
db_type=None,
selected_file_types=None):
assert db_type is not None
if verbose:
if is_url:
print("Ingesting URL: %s" % file, flush=True)
elif is_txt:
print("Ingesting Text: %s" % file, flush=True)
else:
print("Ingesting file: %s" % file, flush=True)
res = None
try:
# don't pass base_path=path, would infinitely recurse
res = file_to_doc(file,
filei=filei,
base_path=None, verbose=verbose, fail_any_exception=fail_any_exception,
chunk=chunk, chunk_size=chunk_size,
n_jobs=n_jobs,
is_url=is_url, is_txt=is_txt,
# urls
use_unstructured=use_unstructured,
use_playwright=use_playwright,
use_selenium=use_selenium,
# pdfs
use_pymupdf=use_pymupdf,
use_unstructured_pdf=use_unstructured_pdf,
use_pypdf=use_pypdf,
enable_pdf_ocr=enable_pdf_ocr,
enable_pdf_doctr=enable_pdf_doctr,
try_pdf_as_html=try_pdf_as_html,
# images
enable_ocr=enable_ocr,
enable_doctr=enable_doctr,
enable_pix2struct=enable_pix2struct,
enable_captions=enable_captions,
captions_model=captions_model,
model_loaders=model_loaders,
# json
jq_schema=jq_schema,
db_type=db_type,
selected_file_types=selected_file_types)
except BaseException as e:
print("Failed to ingest %s due to %s" % (file, traceback.format_exc()))
if fail_any_exception:
raise
else:
exception_doc = Document(
page_content='',
metadata={"source": file, "exception": '%s Exception: %s' % (file, str(e)),
"traceback": traceback.format_exc()})
res = [exception_doc]
if verbose:
if is_url:
print("DONE Ingesting URL: %s" % file, flush=True)
elif is_txt:
print("DONE Ingesting Text: %s" % file, flush=True)
else:
print("DONE Ingesting file: %s" % file, flush=True)
if return_file:
base_tmp = "temp_path_to_doc1"
if not os.path.isdir(base_tmp):
base_tmp = makedirs(base_tmp, exist_ok=True, tmp_ok=True, use_base=True)
filename = os.path.join(base_tmp, str(uuid.uuid4()) + ".tmp.pickle")
with open(filename, 'wb') as f:
pickle.dump(res, f)
return filename
return res
def path_to_docs(path_or_paths, verbose=False, fail_any_exception=False, n_jobs=-1,
chunk=True, chunk_size=512,
url=None, text=None,
# urls
use_unstructured=True,
use_playwright=False,
use_selenium=False,
# pdfs
use_pymupdf='auto',
use_unstructured_pdf='auto',
use_pypdf='auto',
enable_pdf_ocr='auto',
enable_pdf_doctr='auto',
try_pdf_as_html='auto',
# images
enable_ocr=False,
enable_doctr=False,
enable_pix2struct=False,
enable_captions=True,
captions_model=None,
caption_loader=None,
doctr_loader=None,
pix2struct_loader=None,
# json
jq_schema='.[]',
existing_files=[],
existing_hash_ids={},
db_type=None,
selected_file_types=None,
):
if verbose:
print("BEGIN Consuming path_or_paths=%s url=%s text=%s" % (path_or_paths, url, text), flush=True)
if selected_file_types is not None:
non_image_types1 = [x for x in non_image_types if x in selected_file_types]
image_types1 = [x for x in image_types if x in selected_file_types]
else:
non_image_types1 = non_image_types.copy()
image_types1 = image_types.copy()
assert db_type is not None
# path_or_paths could be str, list, tuple, generator
globs_image_types = []
globs_non_image_types = []
if not path_or_paths and not url and not text:
return []
elif url:
url = get_list_or_str(url)
globs_non_image_types = url if isinstance(url, (list, tuple, types.GeneratorType)) else [url]
elif text:
globs_non_image_types = text if isinstance(text, (list, tuple, types.GeneratorType)) else [text]
elif isinstance(path_or_paths, str) and os.path.isdir(path_or_paths):
# single path, only consume allowed files
path = path_or_paths
# Below globs should match patterns in file_to_doc()
[globs_image_types.extend(glob.glob(os.path.join(path, "./**/*.%s" % ftype), recursive=True))
for ftype in image_types1]
globs_image_types = [os.path.normpath(x) for x in globs_image_types]
[globs_non_image_types.extend(glob.glob(os.path.join(path, "./**/*.%s" % ftype), recursive=True))
for ftype in non_image_types1]
globs_non_image_types = [os.path.normpath(x) for x in globs_non_image_types]
else:
if isinstance(path_or_paths, str):
if os.path.isfile(path_or_paths) or os.path.isdir(path_or_paths):
path_or_paths = [path_or_paths]
else:
# path was deleted etc.
return []
# list/tuple of files (consume what can, and exception those that selected but cannot consume so user knows)
assert isinstance(path_or_paths, (list, tuple, types.GeneratorType)), \
"Wrong type for path_or_paths: %s %s" % (path_or_paths, type(path_or_paths))
# reform out of allowed types
globs_image_types.extend(
flatten_list([[os.path.normpath(x) for x in path_or_paths if x.endswith(y)] for y in image_types1]))
# could do below:
# globs_non_image_types = flatten_list([[x for x in path_or_paths if x.endswith(y)] for y in non_image_types1])
# But instead, allow fail so can collect unsupported too
set_globs_image_types = set(globs_image_types)
globs_non_image_types.extend([os.path.normpath(x) for x in path_or_paths if x not in set_globs_image_types])
# filter out any files to skip (e.g. if already processed them)
# this is easy, but too aggressive in case a file changed, so parent probably passed existing_files=[]
assert not existing_files, "DEV: assume not using this approach"
if existing_files:
set_skip_files = set(existing_files)
globs_image_types = [x for x in globs_image_types if x not in set_skip_files]
globs_non_image_types = [x for x in globs_non_image_types if x not in set_skip_files]
if existing_hash_ids:
# assume consistent with add_meta() use of hash_file(file)
# also assume consistent with get_existing_hash_ids for dict creation
# assume hashable values
existing_hash_ids_set = set(existing_hash_ids.items())
hash_ids_all_image = set({x: hash_file(x) for x in globs_image_types}.items())
hash_ids_all_non_image = set({x: hash_file(x) for x in globs_non_image_types}.items())
# don't use symmetric diff. If file is gone, ignore and don't remove or something
# just consider existing files (key) having new hash or not (value)
new_files_image = set(dict(hash_ids_all_image - existing_hash_ids_set).keys())
new_files_non_image = set(dict(hash_ids_all_non_image - existing_hash_ids_set).keys())
globs_image_types = [x for x in globs_image_types if x in new_files_image]
globs_non_image_types = [x for x in globs_non_image_types if x in new_files_non_image]
# could use generator, but messes up metadata handling in recursive case
if caption_loader and not isinstance(caption_loader, (bool, str)) and caption_loader.device != 'cpu' or \
get_device() == 'cuda':
# to avoid deadlocks, presume was preloaded and so can't fork due to cuda context
# get_device() == 'cuda' because presume faster to process image from (temporarily) preloaded model
n_jobs_image = 1
else:
n_jobs_image = n_jobs
if enable_doctr or enable_pdf_doctr in [True, 'auto', 'on']:
if doctr_loader and not isinstance(doctr_loader, (bool, str)) and doctr_loader.device != 'cpu':
# can't fork cuda context
n_jobs = 1
return_file = True # local choice
is_url = url is not None
is_txt = text is not None
model_loaders = dict(caption=caption_loader,
doctr=doctr_loader,
pix2struct=pix2struct_loader)
model_loaders0 = model_loaders.copy()
kwargs = dict(verbose=verbose, fail_any_exception=fail_any_exception,
return_file=return_file,
chunk=chunk, chunk_size=chunk_size,
n_jobs=n_jobs,
is_url=is_url,
is_txt=is_txt,
# urls
use_unstructured=use_unstructured,
use_playwright=use_playwright,
use_selenium=use_selenium,
# pdfs
use_pymupdf=use_pymupdf,
use_unstructured_pdf=use_unstructured_pdf,
use_pypdf=use_pypdf,
enable_pdf_ocr=enable_pdf_ocr,
enable_pdf_doctr=enable_pdf_doctr,
try_pdf_as_html=try_pdf_as_html,
# images
enable_ocr=enable_ocr,
enable_doctr=enable_doctr,
enable_pix2struct=enable_pix2struct,
enable_captions=enable_captions,
captions_model=captions_model,
model_loaders=model_loaders,
# json
jq_schema=jq_schema,
db_type=db_type,
selected_file_types=selected_file_types,
)
if n_jobs != 1 and len(globs_non_image_types) > 1:
# avoid nesting, e.g. upload 1 zip and then inside many files
# harder to handle if upload many zips with many files, inner parallel one will be disabled by joblib
documents = ProgressParallel(n_jobs=n_jobs, verbose=10 if verbose else 0, backend='multiprocessing')(
delayed(path_to_doc1)(file, filei=filei, **kwargs) for filei, file in enumerate(globs_non_image_types)
)
else:
documents = [path_to_doc1(file, filei=filei, **kwargs) for filei, file in
enumerate(tqdm(globs_non_image_types))]
# do images separately since can't fork after cuda in parent, so can't be parallel
if n_jobs_image != 1 and len(globs_image_types) > 1:
# avoid nesting, e.g. upload 1 zip and then inside many files
# harder to handle if upload many zips with many files, inner parallel one will be disabled by joblib
image_documents = ProgressParallel(n_jobs=n_jobs, verbose=10 if verbose else 0, backend='multiprocessing')(
delayed(path_to_doc1)(file, filei=filei, **kwargs) for filei, file in enumerate(globs_image_types)
)
else:
image_documents = [path_to_doc1(file, filei=filei, **kwargs) for filei, file in
enumerate(tqdm(globs_image_types))]
# unload loaders (image loaders, includes enable_pdf_doctr that uses same loader)
for name, loader in model_loaders.items():
loader0 = model_loaders0[name]
real_model_initial = loader0 is not None and not isinstance(loader0, (str, bool))
real_model_final = model_loaders[name] is not None and not isinstance(model_loaders[name], (str, bool))
if not real_model_initial and real_model_final:
# clear off GPU newly added model
model_loaders[name].unload_model()
# add image docs in
documents += image_documents
if return_file:
# then documents really are files
files = documents.copy()
documents = []
for fil in files:
with open(fil, 'rb') as f:
documents.extend(pickle.load(f))
# remove temp pickle
remove(fil)
else:
documents = reduce(concat, documents)
if verbose:
print("END consuming path_or_paths=%s url=%s text=%s" % (path_or_paths, url, text), flush=True)
return documents
def prep_langchain(persist_directory,
load_db_if_exists,
db_type, use_openai_embedding,
langchain_mode, langchain_mode_paths, langchain_mode_types,
hf_embedding_model,
migrate_embedding_model,
auto_migrate_db,
n_jobs=-1, kwargs_make_db={},
verbose=False):
"""
do prep first time, involving downloads
# FIXME: Add github caching then add here
:return:
"""
if os.getenv("HARD_ASSERTS"):
assert langchain_mode not in ['MyData'], "Should not prep scratch/personal data"
if langchain_mode in langchain_modes_intrinsic:
return None
db_dir_exists = os.path.isdir(persist_directory)
user_path = langchain_mode_paths.get(langchain_mode)
if db_dir_exists and user_path is None:
if verbose:
print("Prep: persist_directory=%s exists, using" % persist_directory, flush=True)
db, use_openai_embedding, hf_embedding_model = \
get_existing_db(None, persist_directory, load_db_if_exists,
db_type, use_openai_embedding,
langchain_mode, langchain_mode_paths, langchain_mode_types,
hf_embedding_model, migrate_embedding_model, auto_migrate_db,
n_jobs=n_jobs)
else:
if db_dir_exists and user_path is not None:
if verbose:
print("Prep: persist_directory=%s exists, user_path=%s passed, adding any changed or new documents" % (
persist_directory, user_path), flush=True)
elif not db_dir_exists:
if verbose:
print("Prep: persist_directory=%s does not exist, regenerating" % persist_directory, flush=True)
db = None
if langchain_mode in ['DriverlessAI docs']:
# FIXME: Could also just use dai_docs.pickle directly and upload that
get_dai_docs(from_hf=True)
if langchain_mode in ['wiki']:
get_wiki_sources(first_para=kwargs_make_db['first_para'], text_limit=kwargs_make_db['text_limit'])
langchain_kwargs = kwargs_make_db.copy()
langchain_kwargs.update(locals())
db, num_new_sources, new_sources_metadata = make_db(**langchain_kwargs)
return db
import posthog
posthog.disabled = True
class FakeConsumer(object):
def __init__(self, *args, **kwargs):
pass
def run(self):
pass
def pause(self):
pass
def upload(self):
pass
def next(self):
pass
def request(self, batch):
pass
posthog.Consumer = FakeConsumer
def get_hf_embedding_model_name(hf_embedding_model):
if isinstance(hf_embedding_model, dict):
# embedding itself preloaded globally
hf_embedding_model = hf_embedding_model['name']
return hf_embedding_model
def check_update_chroma_embedding(db,
db_type,
use_openai_embedding,
hf_embedding_model, migrate_embedding_model, auto_migrate_db,
langchain_mode, langchain_mode_paths, langchain_mode_types,
n_jobs=-1):
changed_db = False
embed_tuple = load_embed(db=db)
# expect string comparison, if dict then model object with name and get name not dict or model
hf_embedding_model = get_hf_embedding_model_name(hf_embedding_model)
if embed_tuple not in [(True, use_openai_embedding, hf_embedding_model),
(False, use_openai_embedding, hf_embedding_model)]:
print("Detected new embedding %s vs. %s %s, updating db: %s" % (
use_openai_embedding, hf_embedding_model, embed_tuple, langchain_mode), flush=True)
# handle embedding changes
db_get = get_documents(db)
sources = [Document(page_content=result[0], metadata=result[1] or {})
for result in zip(db_get['documents'], db_get['metadatas'])]
# delete index, has to be redone
persist_directory = db._persist_directory
shutil.move(persist_directory, persist_directory + "_" + str(uuid.uuid4()) + ".bak")
assert db_type in ['chroma', 'chroma_old']
load_db_if_exists = False
db = get_db(sources, use_openai_embedding=use_openai_embedding, db_type=db_type,
persist_directory=persist_directory, load_db_if_exists=load_db_if_exists,
langchain_mode=langchain_mode,
langchain_mode_paths=langchain_mode_paths,
langchain_mode_types=langchain_mode_types,
collection_name=None,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
n_jobs=n_jobs,
)
changed_db = True
print("Done updating db for new embedding: %s" % langchain_mode, flush=True)
return db, changed_db
def migrate_meta_func(db, langchain_mode):
changed_db = False
if db is None:
return db, changed_db
if is_new_chroma_db(db):
# when added new chroma db, already had chunk_id
# so never need to migrate new db that does expensive db.get() because chunk_id always in new db
return db, changed_db
# full db.get() expensive, do faster trial with sim search
# so can just check one doc as consistent or not
docs1 = db.similarity_search("", k=1)
if len(docs1) == 0:
return db, changed_db
doc1 = docs1[0]
metadata1 = doc1.metadata
if 'chunk_id' not in metadata1:
print("Detected old metadata without chunk_id, adding additional information", flush=True)
t0 = time.time()
db_get = get_documents(db)
# handle meta changes
changed_db = True
[x.update(dict(chunk_id=x.get('chunk_id', 0))) for x in db_get['metadatas']]
client_collection = db._client.get_collection(name=db._collection.name,
embedding_function=db._collection._embedding_function)
client_collection.update(ids=db_get['ids'], metadatas=db_get['metadatas'])
if os.getenv('HARD_ASSERTS'):
# check
db_get = get_documents(db)
assert 'chunk_id' in db_get['metadatas'][0], "Failed to add meta"
print("Done updating db for new meta: %s in %s seconds" % (langchain_mode, time.time() - t0), flush=True)
return db, changed_db
def get_existing_db(db, persist_directory,
load_db_if_exists, db_type, use_openai_embedding,
langchain_mode, langchain_mode_paths, langchain_mode_types,
hf_embedding_model,
migrate_embedding_model,
auto_migrate_db=False,
verbose=False, check_embedding=True, migrate_meta=True,
n_jobs=-1):
if load_db_if_exists and db_type in ['chroma', 'chroma_old'] and os.path.isdir(persist_directory):
if os.path.isfile(os.path.join(persist_directory, 'chroma.sqlite3')):
must_migrate = False
elif os.path.isdir(os.path.join(persist_directory, 'index')):
must_migrate = True
else:
return db, use_openai_embedding, hf_embedding_model
chroma_settings = dict(is_persistent=True)
use_chromamigdb = False
if must_migrate:
if auto_migrate_db:
print("Detected chromadb<0.4 database, require migration, doing now....", flush=True)
from chroma_migrate.import_duckdb import migrate_from_duckdb
import chromadb
api = chromadb.PersistentClient(path=persist_directory)
did_migration = migrate_from_duckdb(api, persist_directory)
assert did_migration, "Failed to migrate chroma collection at %s, see https://docs.trychroma.com/migration for CLI tool" % persist_directory
elif have_chromamigdb:
print(
"Detected chroma<0.4 database but --auto_migrate_db=False, but detected chromamigdb package, so using old database that still requires duckdb",
flush=True)
chroma_settings = dict(chroma_db_impl="duckdb+parquet")
use_chromamigdb = True
else:
raise ValueError(
"Detected chromadb<0.4 database, require migration, but did not detect chromamigdb package or did not choose auto_migrate_db=False (see FAQ.md)")
if db is None:
if verbose:
print("DO Loading db: %s" % langchain_mode, flush=True)
got_embedding, use_openai_embedding0, hf_embedding_model0 = load_embed(persist_directory=persist_directory)
if got_embedding:
use_openai_embedding, hf_embedding_model = use_openai_embedding0, hf_embedding_model0
embedding = get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model)
import logging
logging.getLogger("chromadb").setLevel(logging.ERROR)
if use_chromamigdb:
from chromamigdb.config import Settings
chroma_class = ChromaMig
else:
from chromadb.config import Settings
chroma_class = Chroma
client_settings = Settings(anonymized_telemetry=False,
**chroma_settings,
persist_directory=persist_directory)
db = chroma_class(persist_directory=persist_directory, embedding_function=embedding,
collection_name=langchain_mode.replace(' ', '_'),
client_settings=client_settings)
try:
db.similarity_search('')
except BaseException as e:
# migration when no embed_info
if 'Dimensionality of (768) does not match index dimensionality (384)' in str(e) or \
'Embedding dimension 768 does not match collection dimensionality 384' in str(e):
hf_embedding_model = "sentence-transformers/all-MiniLM-L6-v2"
embedding = get_embedding(use_openai_embedding, hf_embedding_model=hf_embedding_model)
db = chroma_class(persist_directory=persist_directory, embedding_function=embedding,
collection_name=langchain_mode.replace(' ', '_'),
client_settings=client_settings)
# should work now, let fail if not
db.similarity_search('')
save_embed(db, use_openai_embedding, hf_embedding_model)
else:
raise
if verbose:
print("DONE Loading db: %s" % langchain_mode, flush=True)
else:
if not migrate_embedding_model:
# OVERRIDE embedding choices if could load embedding info when not migrating
got_embedding, use_openai_embedding, hf_embedding_model = load_embed(db=db)
if verbose:
print("USING already-loaded db: %s" % langchain_mode, flush=True)
if check_embedding:
db_trial, changed_db = check_update_chroma_embedding(db,
db_type,
use_openai_embedding,
hf_embedding_model,
migrate_embedding_model,
auto_migrate_db,
langchain_mode,
langchain_mode_paths,
langchain_mode_types,
n_jobs=n_jobs)
if changed_db:
db = db_trial
# only call persist if really changed db, else takes too long for large db
if db is not None:
db.persist()
clear_embedding(db)
save_embed(db, use_openai_embedding, hf_embedding_model)
if migrate_meta:
db_trial, changed_db = migrate_meta_func(db, langchain_mode)
if changed_db:
db = db_trial
return db, use_openai_embedding, hf_embedding_model
return db, use_openai_embedding, hf_embedding_model
def clear_embedding(db):
if db is None:
return
# don't keep on GPU, wastes memory, push back onto CPU and only put back on GPU once again embed
try:
if hasattr(db._embedding_function, 'client') and hasattr(db._embedding_function.client, 'cpu'):
# only push back to CPU if each db/user has own embedding model, else if shared share on GPU
if hasattr(db._embedding_function.client, 'preload') and not db._embedding_function.client.preload:
db._embedding_function.client.cpu()
clear_torch_cache()
except RuntimeError as e:
print("clear_embedding error: %s" % ''.join(traceback.format_tb(e.__traceback__)), flush=True)
def make_db(**langchain_kwargs):
func_names = list(inspect.signature(_make_db).parameters)
missing_kwargs = [x for x in func_names if x not in langchain_kwargs]
defaults_db = {k: v.default for k, v in dict(inspect.signature(run_qa_db).parameters).items()}
for k in missing_kwargs:
if k in defaults_db:
langchain_kwargs[k] = defaults_db[k]
# final check for missing
missing_kwargs = [x for x in func_names if x not in langchain_kwargs]
assert not missing_kwargs, "Missing kwargs for make_db: %s" % missing_kwargs
# only keep actual used
langchain_kwargs = {k: v for k, v in langchain_kwargs.items() if k in func_names}
return _make_db(**langchain_kwargs)
embed_lock_name = 'embed.lock'
def get_embed_lock_file(db, persist_directory=None):
if hasattr(db, '_persist_directory') or persist_directory:
if persist_directory is None:
persist_directory = db._persist_directory
check_persist_directory(persist_directory)
base_path = os.path.join('locks', persist_directory)
base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True)
lock_file = os.path.join(base_path, embed_lock_name)
makedirs(os.path.dirname(lock_file))
return lock_file
return None
def save_embed(db, use_openai_embedding, hf_embedding_model):
if hasattr(db, '_persist_directory'):
persist_directory = db._persist_directory
lock_file = get_embed_lock_file(db)
with filelock.FileLock(lock_file):
embed_info_file = os.path.join(persist_directory, 'embed_info')
with open(embed_info_file, 'wb') as f:
if isinstance(hf_embedding_model, str):
hf_embedding_model_save = hf_embedding_model
elif hasattr(hf_embedding_model, 'model_name'):
hf_embedding_model_save = hf_embedding_model.model_name
elif isinstance(hf_embedding_model, dict) and 'name' in hf_embedding_model:
hf_embedding_model_save = hf_embedding_model['name']
elif isinstance(hf_embedding_model, dict) and 'name' in hf_embedding_model:
if os.getenv('HARD_ASSERTS'):
# unexpected in testing or normally
raise RuntimeError("HERE")
hf_embedding_model_save = 'hkunlp/instructor-large'
pickle.dump((use_openai_embedding, hf_embedding_model_save), f)
return use_openai_embedding, hf_embedding_model
def load_embed(db=None, persist_directory=None):
if hasattr(db, 'embeddings') and hasattr(db.embeddings, 'model_name'):
hf_embedding_model = db.embeddings.model_name if 'openai' not in db.embeddings.model_name.lower() else None
use_openai_embedding = hf_embedding_model is None
save_embed(db, use_openai_embedding, hf_embedding_model)
return True, use_openai_embedding, hf_embedding_model
if persist_directory is None:
persist_directory = db._persist_directory
embed_info_file = os.path.join(persist_directory, 'embed_info')
if os.path.isfile(embed_info_file):
lock_file = get_embed_lock_file(db, persist_directory=persist_directory)
with filelock.FileLock(lock_file):
with open(embed_info_file, 'rb') as f:
try:
use_openai_embedding, hf_embedding_model = pickle.load(f)
if not isinstance(hf_embedding_model, str):
# work-around bug introduced here: https://github.com/h2oai/h2ogpt/commit/54c4414f1ce3b5b7c938def651c0f6af081c66de
hf_embedding_model = 'hkunlp/instructor-large'
# fix file
save_embed(db, use_openai_embedding, hf_embedding_model)
got_embedding = True
except EOFError:
use_openai_embedding, hf_embedding_model = False, 'hkunlp/instructor-large'
got_embedding = False
if os.getenv('HARD_ASSERTS'):
# unexpected in testing or normally
raise
else:
# migration, assume defaults
use_openai_embedding, hf_embedding_model = False, "sentence-transformers/all-MiniLM-L6-v2"
got_embedding = False
assert isinstance(hf_embedding_model, str)
return got_embedding, use_openai_embedding, hf_embedding_model
def get_persist_directory(langchain_mode, langchain_type=None, db1s=None, dbs=None):
if langchain_mode in [LangChainMode.DISABLED.value, LangChainMode.LLM.value]:
# not None so join works but will fail to find db
return '', langchain_type
userid = get_userid_direct(db1s)
username = get_username_direct(db1s)
# sanity for bad code
assert userid != 'None'
assert username != 'None'
dirid = username or userid
if langchain_type == LangChainTypes.SHARED.value and not dirid:
dirid = './' # just to avoid error
if langchain_type == LangChainTypes.PERSONAL.value and not dirid:
# e.g. from client when doing transient calls with MyData
if db1s is None:
# just trick to get filled locally
db1s = {LangChainMode.MY_DATA.value: [None, None, None]}
set_userid_direct(db1s, str(uuid.uuid4()), str(uuid.uuid4()))
userid = get_userid_direct(db1s)
username = get_username_direct(db1s)
dirid = username or userid
langchain_type = LangChainTypes.PERSONAL.value
# deal with existing locations
user_base_dir = os.getenv('USERS_BASE_DIR', 'users')
persist_directory = os.path.join(user_base_dir, dirid, 'db_dir_%s' % langchain_mode)
if userid and \
(os.path.isdir(persist_directory) or
db1s is not None and langchain_mode in db1s or
langchain_type == LangChainTypes.PERSONAL.value):
langchain_type = LangChainTypes.PERSONAL.value
persist_directory = makedirs(persist_directory, use_base=True)
check_persist_directory(persist_directory)
return persist_directory, langchain_type
persist_directory = 'db_dir_%s' % langchain_mode
if (os.path.isdir(persist_directory) or
dbs is not None and langchain_mode in dbs or
langchain_type == LangChainTypes.SHARED.value):
# ensure consistent
langchain_type = LangChainTypes.SHARED.value
persist_directory = makedirs(persist_directory, use_base=True)
check_persist_directory(persist_directory)
return persist_directory, langchain_type
# dummy return for prep_langchain() or full personal space
base_others = 'db_nonusers'
persist_directory = os.path.join(base_others, 'db_dir_%s' % str(uuid.uuid4()))
persist_directory = makedirs(persist_directory, use_base=True)
langchain_type = LangChainTypes.PERSONAL.value
check_persist_directory(persist_directory)
return persist_directory, langchain_type
def check_persist_directory(persist_directory):
# deal with some cases when see intrinsic names being used as shared
for langchain_mode in langchain_modes_intrinsic:
if persist_directory == 'db_dir_%s' % langchain_mode:
raise RuntimeError("Illegal access to %s" % persist_directory)
def _make_db(use_openai_embedding=False,
hf_embedding_model=None,
migrate_embedding_model=False,
auto_migrate_db=False,
first_para=False, text_limit=None,
chunk=True, chunk_size=512,
# urls
use_unstructured=True,
use_playwright=False,
use_selenium=False,
# pdfs
use_pymupdf='auto',
use_unstructured_pdf='auto',
use_pypdf='auto',
enable_pdf_ocr='auto',
enable_pdf_doctr='auto',
try_pdf_as_html='auto',
# images
enable_ocr=False,
enable_doctr=False,
enable_pix2struct=False,
enable_captions=True,
captions_model=None,
caption_loader=None,
doctr_loader=None,
pix2struct_loader=None,
# json
jq_schema='.[]',
langchain_mode=None,
langchain_mode_paths=None,
langchain_mode_types=None,
db_type='faiss',
load_db_if_exists=True,
db=None,
n_jobs=-1,
verbose=False):
assert hf_embedding_model is not None
user_path = langchain_mode_paths.get(langchain_mode)
langchain_type = langchain_mode_types.get(langchain_mode, LangChainTypes.EITHER.value)
persist_directory, langchain_type = get_persist_directory(langchain_mode, langchain_type=langchain_type)
langchain_mode_types[langchain_mode] = langchain_type
# see if can get persistent chroma db
db_trial, use_openai_embedding, hf_embedding_model = \
get_existing_db(db, persist_directory, load_db_if_exists, db_type,
use_openai_embedding,
langchain_mode, langchain_mode_paths, langchain_mode_types,
hf_embedding_model, migrate_embedding_model, auto_migrate_db, verbose=verbose,
n_jobs=n_jobs)
if db_trial is not None:
db = db_trial
sources = []
if not db:
chunk_sources = functools.partial(_chunk_sources, chunk=chunk, chunk_size=chunk_size, db_type=db_type)
if langchain_mode in ['wiki_full']:
from read_wiki_full import get_all_documents
small_test = None
print("Generating new wiki", flush=True)
sources1 = get_all_documents(small_test=small_test, n_jobs=os.cpu_count() // 2)
print("Got new wiki", flush=True)
sources1 = chunk_sources(sources1, chunk=chunk)
print("Chunked new wiki", flush=True)
sources.extend(sources1)
elif langchain_mode in ['wiki']:
sources1 = get_wiki_sources(first_para=first_para, text_limit=text_limit)
sources1 = chunk_sources(sources1, chunk=chunk)
sources.extend(sources1)
elif langchain_mode in ['github h2oGPT']:
# sources = get_github_docs("dagster-io", "dagster")
sources1 = get_github_docs("h2oai", "h2ogpt")
# FIXME: always chunk for now
sources1 = chunk_sources(sources1)
sources.extend(sources1)
elif langchain_mode in ['DriverlessAI docs']:
sources1 = get_dai_docs(from_hf=True)
# FIXME: DAI docs are already chunked well, should only chunk more if over limit
sources1 = chunk_sources(sources1, chunk=False)
sources.extend(sources1)
if user_path:
# UserData or custom, which has to be from user's disk
if db is not None:
# NOTE: Ignore file names for now, only go by hash ids
# existing_files = get_existing_files(db)
existing_files = []
# full scan below, but only at start-up or when adding files from disk in UI, will be slow for large dbs
# FIXME: Could have option to just add, not delete old ones
existing_hash_ids = get_existing_hash_ids(db)
else:
# pretend no existing files so won't filter
existing_files = []
existing_hash_ids = []
# chunk internally for speed over multiple docs
# FIXME: If first had old Hash=None and switch embeddings,
# then re-embed, and then hit here and reload so have hash, and then re-embed.
sources1 = path_to_docs(user_path, n_jobs=n_jobs, chunk=chunk, chunk_size=chunk_size,
# urls
use_unstructured=use_unstructured,
use_playwright=use_playwright,
use_selenium=use_selenium,
# pdfs
use_pymupdf=use_pymupdf,
use_unstructured_pdf=use_unstructured_pdf,
use_pypdf=use_pypdf,
enable_pdf_ocr=enable_pdf_ocr,
enable_pdf_doctr=enable_pdf_doctr,
try_pdf_as_html=try_pdf_as_html,
# images
enable_ocr=enable_ocr,
enable_doctr=enable_doctr,
enable_pix2struct=enable_pix2struct,
enable_captions=enable_captions,
captions_model=captions_model,
caption_loader=caption_loader,
doctr_loader=doctr_loader,
pix2struct_loader=pix2struct_loader,
# json
jq_schema=jq_schema,
existing_files=existing_files, existing_hash_ids=existing_hash_ids,
db_type=db_type)
new_metadata_sources = set([x.metadata['source'] for x in sources1])
if new_metadata_sources:
if os.getenv('NO_NEW_FILES') is not None:
raise RuntimeError("Expected no new files! %s" % new_metadata_sources)
print("Loaded %s new files as sources to add to %s" % (len(new_metadata_sources), langchain_mode),
flush=True)
if verbose:
print("Files added: %s" % '\n'.join(new_metadata_sources), flush=True)
sources.extend(sources1)
if len(sources) > 0 and os.getenv('NO_NEW_FILES') is not None:
raise RuntimeError("Expected no new files! %s" % langchain_mode)
if len(sources) == 0 and os.getenv('SHOULD_NEW_FILES') is not None:
raise RuntimeError("Expected new files! %s" % langchain_mode)
print("Loaded %s sources for potentially adding to %s" % (len(sources), langchain_mode), flush=True)
# see if got sources
if not sources:
if verbose:
if db is not None:
print("langchain_mode %s has no new sources, nothing to add to db" % langchain_mode, flush=True)
else:
print("langchain_mode %s has no sources, not making new db" % langchain_mode, flush=True)
return db, 0, []
if verbose:
if db is not None:
print("Generating db", flush=True)
else:
print("Adding to db", flush=True)
if not db:
if sources:
db = get_db(sources, use_openai_embedding=use_openai_embedding, db_type=db_type,
persist_directory=persist_directory,
langchain_mode=langchain_mode,
langchain_mode_paths=langchain_mode_paths,
langchain_mode_types=langchain_mode_types,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
n_jobs=n_jobs)
if verbose:
print("Generated db", flush=True)
elif langchain_mode not in langchain_modes_intrinsic:
print("Did not generate db for %s since no sources" % langchain_mode, flush=True)
new_sources_metadata = [x.metadata for x in sources]
elif user_path is not None:
print("Existing db, potentially adding %s sources from user_path=%s" % (len(sources), user_path), flush=True)
db, num_new_sources, new_sources_metadata = add_to_db(db, sources, db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model)
print("Existing db, added %s new sources from user_path=%s" % (num_new_sources, user_path), flush=True)
else:
new_sources_metadata = [x.metadata for x in sources]
return db, len(new_sources_metadata), new_sources_metadata
def is_chroma_db(db):
return isinstance(db, Chroma) or isinstance(db, ChromaMig) or ChromaMig.__name__ in str(db)
def is_new_chroma_db(db):
if isinstance(db, Chroma):
return True
if isinstance(db, ChromaMig) or ChromaMig.__name__ in str(db):
return False
if os.getenv('HARD_ASSERTS'):
raise RuntimeError("Shouldn't reach here, unknown db: %s" % str(db))
return False
def sim_search(db, query='', k=1000, with_score=False, filter_kwargs=None, chunk_id_filter=None, verbose=False):
if is_chroma_db(db) and large_chroma_db(db) and chunk_id_filter is not None:
# try to avoid filter if just doing chunk_id -1 or >= 0
docs = _sim_search(db, query=query, k=k * 4, with_score=with_score, verbose=verbose)
if with_score:
if chunk_id_filter >= 0:
docs = [x for x in docs if x[0].metadata.get('chunk_id', chunk_id_filter) >= chunk_id_filter]
else:
docs = [x for x in docs if x[0].metadata.get('chunk_id', chunk_id_filter) == chunk_id_filter]
else:
if chunk_id_filter >= 0:
docs = [x for x in docs if x.metadata.get('chunk_id', chunk_id_filter) >= chunk_id_filter]
else:
docs = [x for x in docs if x.metadata.get('chunk_id', chunk_id_filter) == chunk_id_filter]
if len(docs) < max(1, k // 4):
# full search if failed to find enough
docs = _sim_search(db, query=query, k=k, with_score=with_score, filter_kwargs=filter_kwargs,
verbose=verbose)
return docs
else:
return _sim_search(db, query=query, k=k, with_score=with_score, filter_kwargs=filter_kwargs, verbose=verbose)
def _sim_search(db, query='', k=1000, with_score=False, filter_kwargs=None, verbose=False):
if k == -1:
k = 1000
if filter_kwargs is None:
filter_kwargs = {}
docs = []
while True:
try:
if with_score:
docs = db.similarity_search_with_score(query, k=k, **filter_kwargs)
else:
docs = db.similarity_search(query, k=k, **filter_kwargs)
break
except (RuntimeError, AttributeError) as e:
# AttributeError is for people with wrong version of langchain
if verbose:
print("chroma bug: %s" % str(e), flush=True)
if k == 1:
raise
if k > 500:
k -= 200
elif k > 100:
k -= 50
elif k > 10:
k -= 5
else:
k -= 1
k = max(1, k)
return docs
def large_chroma_db(db):
return get_size(db._persist_directory) >= 500 * 1024 ** 2
def get_metadatas(db, full_required=True, k_max=10000):
from langchain.vectorstores import FAISS
if isinstance(db, FAISS):
metadatas = [v.metadata for k, v in db.docstore._dict.items()]
elif is_chroma_db(db):
if full_required or not (large_chroma_db(db) and is_new_chroma_db(db)):
db_get = get_documents(db)
documents = db_get['documents']
if documents is None:
documents = []
metadatas = db_get['metadatas']
if metadatas is None:
if documents is not None:
metadatas = [{}] * len(documents)
else:
metadatas = []
else:
# just use sim search, since too many
docs1 = sim_search(db, k=k_max, with_score=False)
metadatas = [x.metadata for x in docs1]
elif db is not None:
# FIXME: Hack due to https://github.com/weaviate/weaviate/issues/1947
# seems no way to get all metadata, so need to avoid this approach for weaviate
metadatas = [x.metadata for x in db.similarity_search("", k=k_max)]
else:
metadatas = []
return metadatas
def get_db_lock_file(db, lock_type='getdb'):
if hasattr(db, '_persist_directory'):
persist_directory = db._persist_directory
check_persist_directory(persist_directory)
base_path = os.path.join('locks', persist_directory)
base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True)
lock_file = os.path.join(base_path, "%s.lock" % lock_type)
makedirs(os.path.dirname(lock_file)) # ensure made
return lock_file
return None
def get_documents(db):
if hasattr(db, '_persist_directory'):
lock_file = get_db_lock_file(db)
with filelock.FileLock(lock_file):
# get segfaults and other errors when multiple threads access this
return _get_documents(db)
else:
return _get_documents(db)
def _get_documents(db):
# returns not just documents, but full dict of documents, metadatas, ids, embeddings
# documents['documents] should be list of texts, not Document() type
from langchain.vectorstores import FAISS
if isinstance(db, FAISS):
documents = [v for k, v in db.docstore._dict.items()]
documents = dict(documents=documents, metadatas=[{}] * len(documents), ids=[0] * len(documents))
elif isinstance(db, Chroma) or isinstance(db, ChromaMig) or ChromaMig.__name__ in str(db):
documents = db.get()
if documents is None:
documents = dict(documents=[], metadatas=[], ids=[])
else:
# FIXME: Hack due to https://github.com/weaviate/weaviate/issues/1947
# seems no way to get all metadata, so need to avoid this approach for weaviate
docs_from_search = [x for x in db.similarity_search("", k=10000)]
# Don't filter out by content etc. here, might use get_metadatas too separately
documents = [x.page_content for x in docs_from_search]
metadatas = [x.metadata for x in docs_from_search]
documents = dict(documents=documents, metadatas=metadatas, ids=[0] * len(documents))
return documents
def get_docs_and_meta(db, top_k_docs, filter_kwargs={}, text_context_list=None, chunk_id_filter=None):
if hasattr(db, '_persist_directory'):
lock_file = get_db_lock_file(db)
with filelock.FileLock(lock_file):
return _get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs,
text_context_list=text_context_list,
chunk_id_filter=chunk_id_filter)
else:
return _get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs,
text_context_list=text_context_list,
chunk_id_filter=chunk_id_filter,
)
def _get_docs_and_meta(db, top_k_docs, filter_kwargs={}, text_context_list=None, chunk_id_filter=None, k_max=1000):
# db_documents should be list of texts
# db_metadatas should be list of dicts
db_documents = []
db_metadatas = []
if text_context_list:
db_documents += [x.page_content if hasattr(x, 'page_content') else x for x in text_context_list]
db_metadatas += [x.metadata if hasattr(x, 'metadata') else {} for x in text_context_list]
from langchain.vectorstores import FAISS
if isinstance(db, Chroma) or isinstance(db, ChromaMig) or ChromaMig.__name__ in str(db):
if top_k_docs == -1:
limit = k_max
else:
limit = max(top_k_docs, k_max)
db_get = db._collection.get(where=filter_kwargs.get('filter'), limit=limit)
db_metadatas += db_get['metadatas']
db_documents += db_get['documents']
elif isinstance(db, FAISS):
import itertools
db_metadatas += get_metadatas(db)
# FIXME: FAISS has no filter
if top_k_docs == -1:
db_docs_faiss = list(db.docstore._dict.values())
else:
# slice dict first
db_docs_faiss = list(dict(itertools.islice(db.docstore._dict.items(), top_k_docs)).values())
db_docs_faiss = [x.page_content for x in db_docs_faiss]
db_documents += db_docs_faiss
elif db is not None:
db_metadatas += get_metadatas(db)
db_documents += get_documents(db)['documents']
return db_documents, db_metadatas
def get_existing_files(db):
# Note: Below full scan if used, but this function not used yet
metadatas = get_metadatas(db)
metadata_sources = set([x['source'] for x in metadatas])
return metadata_sources
def get_existing_hash_ids(db):
metadatas = get_metadatas(db)
# assume consistency, that any prior hashed source was single hashed file at the time among all source chunks
metadata_hash_ids = {os.path.normpath(x['source']): x.get('hashid') for x in metadatas}
return metadata_hash_ids
def run_qa_db(**kwargs):
func_names = list(inspect.signature(_run_qa_db).parameters)
# hard-coded defaults
kwargs['answer_with_sources'] = kwargs.get('answer_with_sources', True)
kwargs['show_rank'] = kwargs.get('show_rank', False)
kwargs['show_accordions'] = kwargs.get('show_accordions', True)
kwargs['show_link_in_sources'] = kwargs.get('show_link_in_sources', True)
kwargs['top_k_docs_max_show'] = kwargs.get('top_k_docs_max_show', 10)
kwargs['llamacpp_dict'] = {} # shouldn't be required unless from test using _run_qa_db
kwargs['exllama_dict'] = {} # shouldn't be required unless from test using _run_qa_db
kwargs['gptq_dict'] = {} # shouldn't be required unless from test using _run_qa_db
kwargs['sink_dict'] = {} # shouldn't be required unless from test using _run_qa_db
kwargs['hf_model_dict'] = {} # shouldn't be required unless from test using _run_qa_db
missing_kwargs = [x for x in func_names if x not in kwargs]
assert not missing_kwargs, "Missing kwargs for run_qa_db: %s" % missing_kwargs
# only keep actual used
kwargs = {k: v for k, v in kwargs.items() if k in func_names}
try:
return _run_qa_db(**kwargs)
finally:
clear_torch_cache()
def _run_qa_db(query=None,
iinput=None,
context=None,
use_openai_model=False, use_openai_embedding=False,
first_para=False, text_limit=None, top_k_docs=4, chunk=True, chunk_size=512,
# urls
use_unstructured=True,
use_playwright=False,
use_selenium=False,
# pdfs
use_pymupdf='auto',
use_unstructured_pdf='auto',
use_pypdf='auto',
enable_pdf_ocr='auto',
enable_pdf_doctr='auto',
try_pdf_as_html='auto',
# images
enable_ocr=False,
enable_doctr=False,
enable_pix2struct=False,
enable_captions=True,
captions_model=None,
caption_loader=None,
doctr_loader=None,
pix2struct_loader=None,
# json
jq_schema='.[]',
langchain_mode_paths={},
langchain_mode_types={},
detect_user_path_changes_every_query=False,
db_type=None,
model_name=None, model=None, tokenizer=None, inference_server=None,
langchain_only_model=False,
hf_embedding_model=None,
migrate_embedding_model=False,
auto_migrate_db=False,
stream_output0=False,
stream_output=False,
async_output=True,
num_async=3,
prompter=None,
prompt_type=None,
prompt_dict=None,
answer_with_sources=True,
append_sources_to_answer=True,
cut_distance=1.64,
add_chat_history_to_context=True,
add_search_to_context=False,
keep_sources_in_context=False,
memory_restriction_level=0,
system_prompt='',
sanitize_bot_response=False,
show_rank=False,
show_accordions=True,
show_link_in_sources=True,
top_k_docs_max_show=10,
use_llm_if_no_docs=True,
load_db_if_exists=False,
db=None,
do_sample=False,
temperature=0.1,
top_p=0.7,
top_k=40,
penalty_alpha=0.0,
num_beams=1,
max_new_tokens=512,
min_new_tokens=1,
attention_sinks=False,
truncation_generation=False,
early_stopping=False,
max_time=180,
repetition_penalty=1.0,
num_return_sequences=1,
langchain_mode=None,
langchain_action=None,
langchain_agents=None,
document_subset=DocumentSubset.Relevant.name,
document_choice=[DocumentChoice.ALL.value],
pre_prompt_query=None,
prompt_query=None,
pre_prompt_summary=None,
prompt_summary=None,
text_context_list=None,
chat_conversation=None,
visible_models=None,
h2ogpt_key=None,
docs_ordering_type=docs_ordering_types_default,
min_max_new_tokens=256,
max_input_tokens=-1,
docs_token_handling=None,
docs_joiner=None,
hyde_level=None,
hyde_template=None,
n_jobs=-1,
llamacpp_dict=None,
exllama_dict=None,
verbose=False,
cli=False,
lora_weights='',
auto_reduce_chunks=True,
max_chunks=100,
total_tokens_for_docs=None,
headsize=50,
):
"""
:param query:
:param use_openai_model:
:param use_openai_embedding:
:param first_para:
:param text_limit:
:param top_k_docs:
:param chunk:
:param chunk_size:
:param langchain_mode_paths: dict of langchain_mode -> user path to glob recursively from
:param db_type: 'faiss' for in-memory
'chroma' (for chroma >= 0.4)
'chroma_old' (for chroma < 0.4)
'weaviate' for persisted on disk
:param model_name: model name, used to switch behaviors
:param model: pre-initialized model, else will make new one
:param tokenizer: pre-initialized tokenizer, else will make new one. Required not None if model is not None
:param answer_with_sources
:return:
"""
t_run = time.time()
if LangChainAgent.SMART.value in langchain_agents:
# FIXME: support whatever model/user supports
# right now doesn't support, just hangs for some reason
async_output = False
elif langchain_action in [LangChainAction.QUERY.value]:
# only summarization supported
async_output = False
else:
if stream_output0:
# threads and asyncio don't mix
async_output = False
else:
# go back to not streaming for summarization/extraction to be parallel
stream_output = stream_output0
# in case doing summarization/extraction, and docs originally limit, relax if each document or reduced response is smaller than max document size
max_new_tokens0 = max_new_tokens
# in case None, e.g. lazy client, then set based upon actual model
pre_prompt_query, prompt_query, pre_prompt_summary, prompt_summary = \
get_langchain_prompts(pre_prompt_query, prompt_query,
pre_prompt_summary, prompt_summary,
model_name, inference_server,
llamacpp_dict.get('model_path_llama'))
assert db_type is not None
assert hf_embedding_model is not None
assert langchain_mode_paths is not None
assert langchain_mode_types is not None
if model is not None:
assert model_name is not None # require so can make decisions
assert query is not None
assert prompter is not None or prompt_type is not None or model is None # if model is None, then will generate
if prompter is not None:
prompt_type = prompter.prompt_type
prompt_dict = prompter.prompt_dict
if model is not None:
assert prompt_type is not None
if prompt_type == PromptType.custom.name:
assert prompt_dict is not None # should at least be {} or ''
else:
prompt_dict = ''
if LangChainAgent.SEARCH.value in langchain_agents and 'llama' in model_name.lower():
system_prompt = """You are a zero shot react agent.
Consider to prompt of Question that was original query from the user.
Respond to prompt of Thought with a thought that may lead to a reasonable new action choice.
Respond to prompt of Action with an action to take out of the tools given, giving exactly single word for the tool name.
Respond to prompt of Action Input with an input to give the tool.
Consider to prompt of Observation that was response from the tool.
Repeat this Thought, Action, Action Input, Observation, Thought sequence several times with new and different thoughts and actions each time, do not repeat.
Once satisfied that the thoughts, responses are sufficient to answer the question, then respond to prompt of Thought with: I now know the final answer
Respond to prompt of Final Answer with your final high-quality bullet list answer to the original query.
"""
prompter.system_prompt = system_prompt
assert len(set(gen_hyper).difference(inspect.signature(get_llm).parameters)) == 0
# pass in context to LLM directly, since already has prompt_type structure
# can't pass through langchain in get_chain() to LLM: https://github.com/hwchase17/langchain/issues/6638
llm_kwargs = dict(use_openai_model=use_openai_model, model_name=model_name,
model=model,
tokenizer=tokenizer,
inference_server=inference_server,
langchain_only_model=langchain_only_model,
stream_output=stream_output,
async_output=async_output,
num_async=num_async,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
penalty_alpha=penalty_alpha,
num_beams=num_beams,
max_new_tokens=max_new_tokens,
max_new_tokens0=max_new_tokens0,
min_new_tokens=min_new_tokens,
early_stopping=early_stopping,
max_time=max_time,
repetition_penalty=repetition_penalty,
num_return_sequences=num_return_sequences,
prompt_type=prompt_type,
prompt_dict=prompt_dict,
prompter=prompter,
context=context,
iinput=iinput,
sanitize_bot_response=sanitize_bot_response,
system_prompt=system_prompt,
visible_models=visible_models,
h2ogpt_key=h2ogpt_key,
min_max_new_tokens=min_max_new_tokens,
max_input_tokens=max_input_tokens,
n_jobs=n_jobs,
llamacpp_dict=llamacpp_dict,
exllama_dict=exllama_dict,
cli=cli,
verbose=verbose,
attention_sinks=attention_sinks,
truncation_generation=truncation_generation,
)
llm, model_name, streamer, prompt_type_out, async_output, only_new_text, gradio_server = \
get_llm(**llm_kwargs)
if LangChainAgent.SMART.value in langchain_agents:
# get llm for exploration
llm_kwargs_explore = llm_kwargs.copy()
llm_kwargs_explore.update(dict(do_sample=True, temperature=0.5))
llm_explore, _, _, _, _, _, _ = get_llm(**llm_kwargs_explore)
else:
llm_explore = None
# in case change, override original prompter
if hasattr(llm, 'prompter'):
prompter = llm.prompter
if hasattr(llm, 'pipeline') and hasattr(llm.pipeline, 'prompter'):
prompter = llm.pipeline.prompter
if prompter is None:
if prompt_type is None:
prompt_type = prompt_type_out
# get prompter
chat = True # FIXME?
prompter = Prompter(prompt_type, prompt_dict, debug=False, chat=chat, stream_output=stream_output,
system_prompt=system_prompt)
scores = []
chain = None
# basic version of prompt without docs etc.
data_point = dict(context=context, instruction=query, input=iinput)
prompt_basic = prompter.generate_prompt(data_point)
# default is to embed query directly without processing
query_embedding = query
if isinstance(document_choice, str):
# support string as well
document_choice = [document_choice]
# NOTE: Could try to establish if pure llm mode or not, but makes code too complex
query_action = langchain_action == LangChainAction.QUERY.value
summarize_action = langchain_action in [LangChainAction.SUMMARIZE_MAP.value,
LangChainAction.SUMMARIZE_ALL.value,
LangChainAction.SUMMARIZE_REFINE.value,
LangChainAction.EXTRACT.value]
get_answer_kwargs = dict(show_accordions=show_accordions,
show_link_in_sources=show_link_in_sources,
top_k_docs_max_show=top_k_docs_max_show,
verbose=verbose,
)
# NOTE: only includes those things get_llm() and get_chain() do not change
run_target_func = functools.partial(run_target,
stream_output=stream_output,
lora_weights=lora_weights, max_time=max_time,
sanitize_bot_response=sanitize_bot_response,
verbose=verbose)
func_names = list(inspect.signature(get_chain).parameters)
sim_kwargs = {k: v for k, v in locals().items() if k in func_names}
missing_kwargs = [x for x in func_names if x not in sim_kwargs]
assert not missing_kwargs, "Missing: %s" % missing_kwargs
llm_answers = {}
if hyde_level is not None and hyde_level > 0 and query_action and document_subset not in non_query_commands:
query_embedding, llm_answers = yield from run_hyde(**locals())
sim_kwargs['query_embedding'] = query_embedding
docs, chain, scores, \
num_docs_before_cut, \
use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text = \
get_chain(**sim_kwargs)
if document_subset in non_query_commands:
formatted_doc_chunks = '\n\n'.join([get_url(x) + '\n\n' + x.page_content for x in docs])
if not formatted_doc_chunks and not use_llm_if_no_docs:
yield dict(prompt=prompt_basic, response="No sources", sources='', num_prompt_tokens=0,
llm_answers=llm_answers)
return
# if no sources, outside gpt_langchain, LLM will be used with '' input
scores = [1] * len(docs)
get_answer_args = tuple([query, docs, formatted_doc_chunks, scores, show_rank,
answer_with_sources,
append_sources_to_answer])
get_answer_kwargs.update(dict(t_run=time.time() - t_run,
count_input_tokens=0,
count_output_tokens=0,
))
ret, extra = get_sources_answer(*get_answer_args, **get_answer_kwargs)
yield dict(prompt=prompt_basic, response=formatted_doc_chunks, sources=extra, num_prompt_tokens=0,
llm_answers=llm_answers)
return
if langchain_agents and not chain:
ret = '%s not supported by this model' % langchain_agents[0]
extra = ''
yield dict(prompt=prompt_basic, response=ret, sources=extra, num_prompt_tokens=0, llm_answers=llm_answers)
return
if langchain_mode not in langchain_modes_non_db and not docs:
if langchain_action in [LangChainAction.SUMMARIZE_MAP.value,
LangChainAction.SUMMARIZE_ALL.value,
LangChainAction.SUMMARIZE_REFINE.value]:
ret = 'No relevant documents to summarize.' if query or num_docs_before_cut > 0 else 'No documents to summarize.'
elif langchain_action in [LangChainAction.EXTRACT.value]:
ret = 'No relevant documents to extract from.' if query or num_docs_before_cut > 0 else 'No documents to extract from.'
elif not use_llm_if_no_docs:
ret = 'No relevant documents to query (for chatting with LLM, pick Resources->Collections->LLM).' if num_docs_before_cut else 'No documents to query (for chatting with LLM, pick Resources->Collections->LLM).'
else:
# if here then ok to continue using chain if exists. E.g. use_llm_if_no_docs=True and doing query langchain_action
ret = None
if ret is not None:
extra = ''
yield dict(prompt=prompt_basic, response=ret, sources=extra, num_prompt_tokens=0, llm_answers=llm_answers)
return
# NOTE: If chain=None, could return if HF type (i.e. not langchain_only_model), but makes code too complex
# only return now if no chain at all, e.g. when only returning sources
if chain is None:
return
answer = yield from run_target_func(query=query,
chain=chain,
llm=llm,
streamer=streamer,
prompter=prompter,
llm_answers=llm_answers,
llm_answers_key='llm_answer_final',
async_output=async_output,
only_new_text=only_new_text)
get_answer_args = tuple([query, docs, answer, scores, show_rank,
answer_with_sources,
append_sources_to_answer])
get_answer_kwargs.update(dict(t_run=time.time() - t_run,
count_input_tokens=llm.count_input_tokens
if hasattr(llm, 'count_input_tokens') else None,
count_output_tokens=llm.count_output_tokens
if hasattr(llm, 'count_output_tokens') else None,
))
# for final yield, get real prompt used
if hasattr(llm, 'prompter') and llm.prompter.prompt is not None:
prompt = llm.prompter.prompt
else:
prompt = prompt_basic
num_prompt_tokens = get_token_count(prompt, tokenizer)
if len(docs) == 0:
# if no docs, then no sources to cite
ret, extra = answer, ''
# doesn't actually have docs, but name means got to end with that answer
llm_answers['llm_answer_final'] = ret
if verbose:
print('response: %s' % ret)
yield dict(prompt=prompt, response=ret, sources=extra, num_prompt_tokens=num_prompt_tokens,
llm_answers=llm_answers)
elif answer is not None:
ret, extra = get_sources_answer(*get_answer_args, **get_answer_kwargs)
llm_answers['llm_answer_final'] = ret
if verbose:
print('response: %s' % ret)
yield dict(prompt=prompt, response=ret, sources=extra, num_prompt_tokens=num_prompt_tokens,
llm_answers=llm_answers)
return
def run_target(query='',
chain=None,
llm=None,
streamer=None,
prompter=None,
llm_answers={},
llm_answers_key='llm_answer_final',
async_output=False,
only_new_text=True,
# things below are fixed for entire _run_qa_db() call once hit get_llm() and so on
stream_output=False,
lora_weights='',
max_time=0,
sanitize_bot_response=False,
verbose=False):
# context stuff similar to used in evaluate()
import torch
device, torch_dtype, context_class = get_device_dtype()
conditional_type = hasattr(llm, 'pipeline') and hasattr(llm.pipeline, 'model') and hasattr(llm.pipeline.model,
'conditional_type') and llm.pipeline.model.conditional_type
with torch.no_grad():
have_lora_weights = lora_weights not in [no_lora_str, '', None]
context_class_cast = NullContext if device == 'cpu' or have_lora_weights else torch.autocast
if conditional_type:
# issues when casting to float16, can mess up t5 model, e.g. only when not streaming, or other odd behaviors
context_class_cast = NullContext
with context_class_cast(device):
if stream_output and streamer:
answer = None
import queue
bucket = queue.Queue()
thread = EThread(target=chain, streamer=streamer, bucket=bucket)
thread.start()
outputs = ""
output1_old = ''
res_dict = dict(prompt=query, response='', sources='', num_prompt_tokens=0, llm_answers=llm_answers)
try:
tgen0 = time.time()
for new_text in streamer:
# print("new_text: %s" % new_text, flush=True)
if bucket.qsize() > 0 or thread.exc:
thread.join()
outputs += new_text
if prompter: # and False: # FIXME: pipeline can already use prompter
if conditional_type:
if prompter.botstr:
prompt = prompter.botstr
output_with_prompt = prompt + outputs
only_new_text = False # override llm return
else:
prompt = None
output_with_prompt = outputs
only_new_text = True # override llm return
else:
prompt = None # FIXME
output_with_prompt = outputs
# don't specify only_new_text here, use get_llm() value
output1 = prompter.get_response(output_with_prompt, prompt=prompt,
only_new_text=only_new_text,
sanitize_bot_response=sanitize_bot_response)
else:
output1 = outputs
# in-place change to this key so exposed outside this generator
llm_answers[llm_answers_key] = output1
res_dict = dict(prompt=query, response=output1, sources='', num_prompt_tokens=0,
llm_answers=llm_answers)
if output1 != output1_old:
yield res_dict
output1_old = output1
if time.time() - tgen0 > max_time:
if verbose:
print("Took too long EThread for %s" % (time.time() - tgen0), flush=True)
break
# yield if anything left over as can happen (FIXME: Understand better)
yield res_dict
except BaseException:
# if any exception, raise that exception if was from thread, first
if thread.exc:
raise thread.exc
raise
finally:
# in case no exception and didn't join with thread yet, then join
if not thread.exc:
answer = thread.join()
if isinstance(answer, dict):
if 'output_text' in answer:
answer = answer['output_text']
elif 'output' in answer:
answer = answer['output']
elif 'resolution' in answer:
answer = answer['resolution']
# in case raise StopIteration or broke queue loop in streamer, but still have exception
if thread.exc:
raise thread.exc
else:
if async_output:
import asyncio
answer = asyncio.run(chain())
else:
answer = chain()
if isinstance(answer, dict):
if 'output_text' in answer:
answer = answer['output_text']
elif 'output' in answer:
answer = answer['output']
elif 'resolution' in answer:
answer = answer['resolution']
llm_answers[llm_answers_key] = answer
if verbose:
print("answer: %s" % answer, flush=True)
return answer
def get_docs_with_score(query, k_db,
filter_kwargs,
filter_kwargs_backup,
db, db_type, text_context_list=None,
chunk_id_filter=None,
verbose=False):
docs_with_score = _get_docs_with_score(query, k_db,
filter_kwargs,
db, db_type,
text_context_list=text_context_list,
chunk_id_filter=chunk_id_filter,
verbose=verbose)
if len(docs_with_score) == 0 and filter_kwargs != filter_kwargs_backup:
docs_with_score = _get_docs_with_score(query, k_db,
filter_kwargs_backup,
db, db_type,
text_context_list=text_context_list,
chunk_id_filter=chunk_id_filter,
verbose=verbose)
return docs_with_score
def _get_docs_with_score(query, k_db,
filter_kwargs,
db, db_type, text_context_list=None,
chunk_id_filter=None,
verbose=False):
docs_with_score = []
if text_context_list:
docs_with_score += [(x, x.metadata.get('score', 1.0)) for x in text_context_list]
# deal with bug in chroma where if (say) 234 doc chunks and ask for 233+ then fails due to reduction misbehavior
if hasattr(db, '_embedding_function') and isinstance(db._embedding_function, FakeEmbeddings):
top_k_docs = -1
# don't add text_context_list twice
db_documents, db_metadatas = get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs,
text_context_list=None)
# sort by order given to parser (file_id) and any chunk_id if chunked
doc_file_ids = [x.get('file_id', 0) for x in db_metadatas]
doc_chunk_ids = [x.get('chunk_id', 0) for x in db_metadatas]
docs_with_score_fake = [(Document(page_content=result[0], metadata=result[1] or {}), 1.0)
for result in zip(db_documents, db_metadatas)]
docs_with_score_fake = [x for fx, cx, x in
sorted(zip(doc_file_ids, doc_chunk_ids, docs_with_score_fake),
key=lambda x: (x[0], x[1]))
]
docs_with_score += docs_with_score_fake
elif db is not None and db_type in ['chroma', 'chroma_old']:
t0 = time.time()
docs_with_score_chroma = sim_search(db, query=query, k=k_db, with_score=True,
filter_kwargs=filter_kwargs,
chunk_id_filter=chunk_id_filter,
verbose=verbose)
docs_with_score += docs_with_score_chroma
if verbose:
print("sim_search in %s" % (time.time() - t0), flush=True)
elif db is not None:
docs_with_score_other = db.similarity_search_with_score(query, k=k_db, **filter_kwargs)
docs_with_score += docs_with_score_other
# set in metadata original order of docs
[x[0].metadata.update(orig_index=ii) for ii, x in enumerate(docs_with_score)]
return docs_with_score
def select_docs_with_score(docs_with_score, top_k_docs, one_doc_size):
if top_k_docs > 0:
docs_with_score = docs_with_score[:top_k_docs]
elif one_doc_size is not None:
docs_with_score = [(docs_with_score[0][:one_doc_size], docs_with_score[0][1])]
else:
docs_with_score = []
return docs_with_score
class H2OCharacterTextSplitter(RecursiveCharacterTextSplitter):
@classmethod
def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter:
def _huggingface_tokenizer_length(text: str) -> int:
return get_token_count(text, tokenizer)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
def split_merge_docs(docs_with_score, tokenizer=None, max_input_tokens=None, docs_token_handling=None,
joiner=docs_joiner_default,
do_split=True,
verbose=False):
# NOTE: Could use joiner=\n\n, but if PDF and continues, might want just full continue with joiner=''
# NOTE: assume max_input_tokens already processed if was -1 and accounts for model_max_len
if docs_token_handling in ['chunk']:
return docs_with_score, 0
elif docs_token_handling in [None, 'split_or_merge']:
assert tokenizer
tokens_before_split = [get_token_count(x + docs_joiner_default, tokenizer) for x in
[x[0].page_content for x in docs_with_score]]
# skip split if not necessary, since expensive for some reason
do_split &= any([x > max_input_tokens for x in tokens_before_split])
if do_split:
if verbose:
print('tokens_before_split=%s' % tokens_before_split, flush=True)
# see if need to split
# account for joiner tokens
joiner_tokens = get_token_count(docs_joiner_default, tokenizer)
chunk_size = max_input_tokens - joiner_tokens * len(docs_with_score)
text_splitter = H2OCharacterTextSplitter.from_huggingface_tokenizer(
tokenizer, chunk_size=chunk_size, chunk_overlap=0
)
[x[0].metadata.update(dict(docscore=x[1], doci=doci, ntokens=tokens_before_split[doci])) for doci, x in
enumerate(docs_with_score)]
docs = [x[0] for x in docs_with_score]
# only split those that need to be split, else recursive splitter goes too nuts and takes too long
docs_to_split = [x for x in docs if x.metadata['ntokens'] > chunk_size]
docs_to_not_split = [x for x in docs if x.metadata['ntokens'] <= chunk_size]
docs_split_new = flatten_list([text_splitter.split_documents([x]) for x in docs_to_split])
docs_new = docs_to_not_split + docs_split_new
doci_new = [x.metadata['doci'] for x in docs_new]
# order back by doci
docs_new = [x for _, x in sorted(zip(doci_new, docs_new), key=lambda pair: pair[0])]
docs_with_score = [(x, x.metadata['docscore']) for x in docs_new]
tokens_after_split = [get_token_count(x + docs_joiner_default, tokenizer) for x in
[x[0].page_content for x in docs_with_score]]
if verbose:
print('tokens_after_split=%s' % tokens_after_split, flush=True)
docs_with_score_new = []
k = 0
while k < len(docs_with_score):
# means use max_input_tokens to ensure model gets no more than max_input_tokens each map
top_k_docs, one_doc_size, num_doc_tokens = \
get_docs_tokens(tokenizer,
text_context_list=[x[0].page_content for x in docs_with_score[k:]],
max_input_tokens=max_input_tokens)
docs_with_score1 = select_docs_with_score(docs_with_score[k:], top_k_docs, one_doc_size)
new_score = docs_with_score1[0][1]
new_page_content = joiner.join([x[0].page_content for x in docs_with_score1])
new_metadata = docs_with_score1[0][0].metadata # just use first chunk's metadata for now
doc1 = Document(page_content=new_page_content, metadata=new_metadata)
docs_with_score_new.append((doc1, new_score))
if do_split:
assert one_doc_size is None, "Split failed: %s" % one_doc_size
elif one_doc_size is not None:
# chopped
assert top_k_docs == 1
assert top_k_docs >= 1
k += top_k_docs
tokens_after_merge = [get_token_count(x + docs_joiner_default, tokenizer) for x in
[x[0].page_content for x in docs_with_score_new]]
if verbose:
print('tokens_after_merge=%s' % tokens_after_merge, flush=True)
max_tokens_after_merge = max(tokens_after_merge) if tokens_after_merge else 0
return docs_with_score_new, max_tokens_after_merge
else:
raise ValueError("No such docs_token_handling=%s" % docs_token_handling)
def get_single_document(document_choice, db, extension=None):
if isinstance(document_choice, str):
document_choice = [document_choice]
if document_choice and document_choice[0] == DocumentChoice.ALL.value:
document_choice.remove(DocumentChoice.ALL.value)
if document_choice is None:
return None
if len(document_choice) > 0:
# then choose what user gave, first if have to choose
document_choice_agent = [x for x in document_choice if x.endswith(extension)]
elif len(document_choice) == 0:
# means user didn't choose, see if can auto-choose
document_choice_agent = sorted(set([x['source'] for x in get_metadatas(db, k_max=1000) if
extension is None or x['source'].endswith(extension)]))
else:
document_choice_agent = document_choice
document_choice_agent = [x for x in document_choice_agent if x.endswith(extension)]
if len(document_choice_agent) > 0:
return document_choice_agent[0]
else:
return None
def run_hyde(*args, **kwargs):
"""
:param hyde_level: HYDE level
0: No HYDE
1: Use non-document-based LLM response and original query for embedding query
2: Use document-based LLM response and original query for embedding query
3+: continue iterations of embedding prior answer and getting new response
:param hyde_template: Use HYDE approach (https://arxiv.org/abs/2212.10496)
None, 'None', 'auto' uses internal value and enable
'off' means disable
'{query}' is minimal template one can pass
"""
# get vars
query = kwargs['query']
sim_kwargs = kwargs['sim_kwargs']
run_target_func = kwargs['run_target_func']
prompter = kwargs['prompter']
hyde_level = kwargs['hyde_level']
hyde_template = kwargs['hyde_template']
verbose = kwargs['verbose']
show_rank = kwargs['show_rank']
answer_with_sources = kwargs['answer_with_sources']
get_answer_kwargs = kwargs['get_answer_kwargs']
append_sources_to_answer = kwargs['append_sources_to_answer']
prompt_basic = kwargs['prompt_basic']
# get llm answer
auto_hyde = """Answer this question with vibrant details in order for some NLP embedding model to use that answer as better query than original question: {query}"""
if hyde_template in auto_choices:
hyde_template = auto_hyde
elif isinstance(hyde_template, str):
assert '{query}' in hyde_template, "Require at least {query} in HYDE template, but got: %s" % hyde_template
else:
raise TypeError("Bad Type hyde_template=%s" % hyde_template)
hyde_higher_template = """{query}\n\n{answer}"""
# default
llm_answers = {}
hyde_chain = sim_kwargs.copy()
# no-doc chain first if done
hyde_chain['query'] = hyde_template.format(query=query)
hyde_chain['db'] = None
hyde_chain['text_context_list'] = []
for hyde_level in range(hyde_level):
if verbose:
print("hyde_level=%d embedding_query=%s" % (hyde_level, hyde_chain['query']), flush=True)
# run chain
docs, chain, scores, \
num_docs_before_cut, \
use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text = \
get_chain(**hyde_chain)
# get answer, updates llm_answers internally too
llm_answers_key = 'llm_answers_hyde_level_%d' % hyde_level
# for LLM, query remains same each time
answer = yield from run_target_func(query=query,
chain=chain,
llm=llm,
streamer=streamer,
prompter=prompter,
llm_answers=llm_answers,
llm_answers_key=llm_answers_key,
async_output=async_output,
only_new_text=only_new_text)
if answer:
# give back what have so far with any sources (what above yield doesn't do)
get_answer_args = tuple([query, docs,
answer,
scores, show_rank,
answer_with_sources,
append_sources_to_answer])
ret, extra = get_sources_answer(*get_answer_args, **get_answer_kwargs)
# FIXME: Something odd, UI gets stuck and no more yields if pass these sources inside ret
# https://github.com/gradio-app/gradio/issues/6100
# print("ret: %s" % ret)
# yield dict(prompt=prompt_basic, response=ret, sources=extra, num_prompt_tokens=0, llm_answers=llm_answers)
# try yield after
# print("answer: %s" % answer)
yield dict(prompt=prompt_basic, response=answer, sources=extra, num_prompt_tokens=0,
llm_answers=llm_answers)
# update embedding query
hyde_chain['query_embedding'] = hyde_higher_template.format(query=query, answer=answer)
# update hyde_chain with doc version from now on
hyde_chain['db'] = kwargs['db']
hyde_chain['text_context_list'] = kwargs['text_context_list']
return hyde_chain['query_embedding'], llm_answers
def get_chain(query=None,
query_embedding=None,
iinput=None,
context=None, # FIXME: https://github.com/hwchase17/langchain/issues/6638
use_openai_model=False, use_openai_embedding=False,
first_para=False, text_limit=None, top_k_docs=4, chunk=True, chunk_size=512,
# urls
use_unstructured=True,
use_playwright=False,
use_selenium=False,
# pdfs
use_pymupdf='auto',
use_unstructured_pdf='auto',
use_pypdf='auto',
enable_pdf_ocr='auto',
enable_pdf_doctr='auto',
try_pdf_as_html='auto',
# images
enable_ocr=False,
enable_doctr=False,
enable_pix2struct=False,
enable_captions=True,
captions_model=None,
caption_loader=None,
doctr_loader=None,
pix2struct_loader=None,
# json
jq_schema='.[]',
langchain_mode_paths=None,
langchain_mode_types=None,
detect_user_path_changes_every_query=False,
db_type='faiss',
model_name=None,
inference_server='',
max_new_tokens=None,
langchain_only_model=False,
hf_embedding_model=None,
migrate_embedding_model=False,
auto_migrate_db=False,
prompter=None,
prompt_type=None,
prompt_dict=None,
system_prompt=None,
cut_distance=1.1,
add_chat_history_to_context=True, # FIXME: https://github.com/hwchase17/langchain/issues/6638
add_search_to_context=False,
keep_sources_in_context=False,
memory_restriction_level=0,
top_k_docs_max_show=10,
load_db_if_exists=False,
db=None,
langchain_mode=None,
langchain_action=None,
langchain_agents=None,
document_subset=DocumentSubset.Relevant.name,
document_choice=[DocumentChoice.ALL.value],
pre_prompt_query=None,
prompt_query=None,
pre_prompt_summary=None,
prompt_summary=None,
text_context_list=None,
chat_conversation=None,
n_jobs=-1,
# beyond run_db_query:
llm=None,
llm_kwargs=None,
llm_explore=None,
streamer=None,
prompt_type_out=None,
only_new_text=None,
tokenizer=None,
verbose=False,
docs_ordering_type=docs_ordering_types_default,
min_max_new_tokens=256,
max_input_tokens=-1,
attention_sinks=False,
truncation_generation=False,
docs_token_handling=None,
docs_joiner=None,
stream_output=True,
async_output=True,
gradio_server=False,
# local
auto_reduce_chunks=True,
max_chunks=100,
total_tokens_for_docs=None,
use_llm_if_no_docs=None,
headsize=50,
max_time=None,
query_action=None,
summarize_action=None,
):
if inference_server is None:
inference_server = ''
assert hf_embedding_model is not None
assert langchain_agents is not None # should be at least []
if text_context_list is None:
text_context_list = []
# same code in get_limited_prompt, but needed for summarization/extraction since only query returns that
if gradio_server or not inference_server:
# can listen to truncation_generation
pass
else:
# these don't support allowing going beyond total context
truncation_generation = True
# default nothing
docs = []
target = None
scores = []
num_docs_before_cut = 0
use_llm_if_no_docs = True
if len(text_context_list) > 0:
# turn into documents to make easy to manage and add meta
# try to account for summarization vs. query
chunk_id = 0 if query_action else -1
text_context_list = [
Document(page_content=x, metadata=dict(source='text_context_list', score=1.0, chunk_id=chunk_id)) for x
in text_context_list]
if add_search_to_context:
params = {
"engine": "duckduckgo",
"gl": "us",
"hl": "en",
}
search = H2OSerpAPIWrapper(params=params)
# if doing search, allow more docs
docs_search, top_k_docs = search.get_search_documents(query,
query_action=query_action,
chunk=chunk, chunk_size=chunk_size,
db_type=db_type,
headsize=headsize,
top_k_docs=top_k_docs)
text_context_list = docs_search + text_context_list
add_search_to_context &= len(docs_search) > 0
top_k_docs_max_show = max(top_k_docs_max_show, len(docs_search))
if LangChainAgent.SMART.value in langchain_agents:
# doesn't really work for non-OpenAI models unless larger
# but allow for now any model
if True:
# FIXME: streams first llm if both same llm, but not final answer part
# FIXME: If 2 llms, then no streaming from ideation_llm, only from 2nd llm
from langchain_experimental.smart_llm import SmartLLMChain
ideation_llm = llm_explore if llm_explore is not None else llm
critique_resolution_llm = llm
prompt = PromptTemplate.from_template(query)
chain = SmartLLMChain(
ideation_llm=ideation_llm,
llm=critique_resolution_llm,
n_ideas=3,
verbose=verbose,
prompt=prompt,
)
chain_kwargs = {}
if async_output:
chain_func = chain.arun
else:
chain_func = chain
target = wrapped_partial(chain_func, chain_kwargs)
docs = []
scores = []
num_docs_before_cut = 0
use_llm_if_no_docs = True
return docs, target, scores, num_docs_before_cut, use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text
from src.output_parser import H2OMRKLOutputParser
from langchain.agents import AgentType, load_tools, initialize_agent, create_vectorstore_agent, \
create_pandas_dataframe_agent, create_json_agent, create_csv_agent
from langchain.agents.agent_toolkits import VectorStoreInfo, VectorStoreToolkit, create_python_agent, JsonToolkit
if LangChainAgent.SEARCH.value in langchain_agents:
output_parser = H2OMRKLOutputParser()
tools = load_tools(["serpapi"], llm=llm, serpapi_api_key=os.environ.get('SERPAPI_API_KEY'))
if does_support_functiontools(inference_server, model_name):
agent_type = AgentType.OPENAI_FUNCTIONS
agent_executor_kwargs = {"handle_parsing_errors": True, 'output_parser': output_parser}
else:
agent_type = AgentType.ZERO_SHOT_REACT_DESCRIPTION
agent_executor_kwargs = {'output_parser': output_parser}
chain = initialize_agent(tools, llm, agent=agent_type,
agent_executor_kwargs=agent_executor_kwargs,
agent_kwargs=dict(output_parser=output_parser,
format_instructions=output_parser.get_format_instructions()),
output_parser=output_parser,
max_iterations=10,
max_execution_time=max_time,
verbose=True)
chain_kwargs = dict(input=query)
target = wrapped_partial(chain, chain_kwargs)
docs = []
scores = []
num_docs_before_cut = 0
use_llm_if_no_docs = True
return docs, target, scores, num_docs_before_cut, use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text
if LangChainAgent.COLLECTION.value in langchain_agents:
if db:
output_parser = H2OMRKLOutputParser()
vectorstore_info = VectorStoreInfo(
name=langchain_mode,
description="DataBase of text from PDFs, Image Captions, or web URL content",
vectorstore=db,
)
toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info)
chain = create_vectorstore_agent(llm=llm, toolkit=toolkit,
agent_executor_kwargs=dict(output_parser=output_parser),
verbose=True, max_execution_time=max_time)
chain_kwargs = dict(input=query)
target = wrapped_partial(chain, chain_kwargs)
use_llm_if_no_docs = True
return docs, target, scores, num_docs_before_cut, use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text
if LangChainAgent.PYTHON.value in langchain_agents:
# non-thread safe things inside worker, but only after in fork, so ok
if does_support_functiontools(inference_server, model_name):
chain = create_python_agent(
llm=llm,
tool=PythonREPLTool(),
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
agent_executor_kwargs={"handle_parsing_errors": True, 'max_execution_time': max_time},
max_execution_time=max_time,
)
chain_kwargs = dict(input=query)
target = wrapped_partial(chain, chain_kwargs)
use_llm_if_no_docs = True
return docs, target, scores, num_docs_before_cut, use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text
if LangChainAgent.PANDAS.value in langchain_agents:
document_choice = get_single_document(document_choice, db, extension='csv')
if document_choice and does_support_functiontools(inference_server, model_name):
df = pd.read_csv(document_choice)
chain = create_pandas_dataframe_agent(
llm,
df,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
max_execution_time=max_time,
)
chain_kwargs = dict(input=query)
target = wrapped_partial(chain, chain_kwargs)
docs = []
scores = []
num_docs_before_cut = 0
use_llm_if_no_docs = True
return docs, target, scores, num_docs_before_cut, use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text
if LangChainAgent.JSON.value in langchain_agents:
document_choice = get_single_document(document_choice, db, extension='json')
if document_choice and does_support_functiontools(inference_server, model_name):
# with open('src/openai.yaml') as f:
# data = yaml.load(f, Loader=yaml.FullLoader)
with open(document_choice[0], 'rt') as f:
data = json.loads(f.read())
json_spec = JsonSpec(dict_=data, max_value_length=4000)
json_toolkit = JsonToolkit(spec=json_spec)
chain = create_json_agent(
llm=llm, toolkit=json_toolkit, verbose=True, max_execution_time=max_time,
)
chain_kwargs = dict(input=query)
target = wrapped_partial(chain, chain_kwargs)
docs = []
scores = []
num_docs_before_cut = 0
use_llm_if_no_docs = True
return docs, target, scores, num_docs_before_cut, use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text
if LangChainAgent.CSV.value in langchain_agents:
document_choice = get_single_document(document_choice, db, extension='csv')
if document_choice:
if does_support_functiontools(inference_server, model_name):
chain = create_csv_agent(
llm,
document_choice,
verbose=True, max_execution_time=max_time,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
else:
chain = create_csv_agent(
llm,
document_choice,
verbose=True, max_execution_time=max_time,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
chain_kwargs = dict(input=query)
target = wrapped_partial(chain, chain_kwargs)
docs = []
scores = []
num_docs_before_cut = 0
use_llm_if_no_docs = True
return docs, target, scores, num_docs_before_cut, use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text
# https://github.com/hwchase17/langchain/issues/1946
# FIXME: Seems to way to get size of chroma db to limit top_k_docs to avoid
# Chroma collection MyData contains fewer than 4 elements.
# type logger error
if top_k_docs == -1:
k_db = 1000 if db_type in ['chroma', 'chroma_old'] else 100
else:
# top_k_docs=100 works ok too
k_db = 1000 if db_type in ['chroma', 'chroma_old'] else top_k_docs
# FIXME: For All just go over all dbs instead of a separate db for All
if not detect_user_path_changes_every_query and db is not None:
# avoid looking at user_path during similarity search db handling,
# if already have db and not updating from user_path every query
# but if db is None, no db yet loaded (e.g. from prep), so allow user_path to be whatever it was
if langchain_mode_paths is None:
langchain_mode_paths = {}
langchain_mode_paths = langchain_mode_paths.copy()
langchain_mode_paths[langchain_mode] = None
# once use_openai_embedding, hf_embedding_model passed in, possibly changed,
# but that's ok as not used below or in calling functions
db, num_new_sources, new_sources_metadata = make_db(use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
first_para=first_para, text_limit=text_limit,
chunk=chunk, chunk_size=chunk_size,
# urls
use_unstructured=use_unstructured,
use_playwright=use_playwright,
use_selenium=use_selenium,
# pdfs
use_pymupdf=use_pymupdf,
use_unstructured_pdf=use_unstructured_pdf,
use_pypdf=use_pypdf,
enable_pdf_ocr=enable_pdf_ocr,
enable_pdf_doctr=enable_pdf_doctr,
try_pdf_as_html=try_pdf_as_html,
# images
enable_ocr=enable_ocr,
enable_doctr=enable_doctr,
enable_pix2struct=enable_pix2struct,
enable_captions=enable_captions,
captions_model=captions_model,
caption_loader=caption_loader,
doctr_loader=doctr_loader,
pix2struct_loader=pix2struct_loader,
# json
jq_schema=jq_schema,
langchain_mode=langchain_mode,
langchain_mode_paths=langchain_mode_paths,
langchain_mode_types=langchain_mode_types,
db_type=db_type,
load_db_if_exists=load_db_if_exists,
db=db,
n_jobs=n_jobs,
verbose=verbose)
num_docs_before_cut = 0
use_template = not use_openai_model and prompt_type not in ['plain'] or langchain_only_model
template, template_if_no_docs, auto_reduce_chunks, query = \
get_template(query, iinput,
pre_prompt_query, prompt_query,
pre_prompt_summary, prompt_summary,
langchain_action,
True, # just to overestimate prompting
auto_reduce_chunks,
add_search_to_context)
# use min_max_new_tokens instead of max_new_tokens for max_new_tokens to get largest input allowable
# else max_input_tokens interpreted as user input as smaller than possible and get over-restricted
max_input_tokens_default = get_max_input_tokens(llm=llm, tokenizer=tokenizer, inference_server=inference_server,
model_name=model_name, max_new_tokens=min_max_new_tokens)
if max_input_tokens >= 0:
max_input_tokens = min(max_input_tokens_default, max_input_tokens)
else:
max_input_tokens = max_input_tokens_default
model_max_length = get_model_max_length(llm=llm, tokenizer=tokenizer, inference_server=inference_server,
model_name=model_name)
if hasattr(db, '_persist_directory'):
lock_file = get_db_lock_file(db, lock_type='sim')
else:
base_path = 'locks'
base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True)
name_path = "sim.lock"
lock_file = os.path.join(base_path, name_path)
# GET FILTER
if not is_chroma_db(db):
# only chroma supports filtering
chunk_id_filter = None
filter_kwargs = {}
filter_kwargs_backup = {}
else:
import logging
logging.getLogger("chromadb").setLevel(logging.ERROR)
assert document_choice is not None, "Document choice was None"
if isinstance(db, Chroma):
filter_kwargs_backup = {} # shouldn't ever need backup
# chroma >= 0.4
if len(document_choice) == 0 or len(document_choice) >= 1 and document_choice[
0] == DocumentChoice.ALL.value:
chunk_id_filter = 0 if query_action else -1
filter_kwargs = {"filter": {"chunk_id": {"$gte": 0}}} if query_action else \
{"filter": {"chunk_id": {"$eq": -1}}}
else:
if document_choice[0] == DocumentChoice.ALL.value:
document_choice = document_choice[1:]
if len(document_choice) == 0:
chunk_id_filter = None
filter_kwargs = {}
elif len(document_choice) > 1:
chunk_id_filter = None
or_filter = [
{"$and": [dict(source={"$eq": x}), dict(chunk_id={"$gte": 0})]} if query_action else {
"$and": [dict(source={"$eq": x}), dict(chunk_id={"$eq": -1})]}
for x in document_choice]
filter_kwargs = dict(filter={"$or": or_filter})
else:
chunk_id_filter = None
# still chromadb UX bug, have to do different thing for 1 vs. 2+ docs when doing filter
one_filter = \
[{"source": {"$eq": x}, "chunk_id": {"$gte": 0}} if query_action else {
"source": {"$eq": x},
"chunk_id": {
"$eq": -1}}
for x in document_choice][0]
filter_kwargs = dict(filter={"$and": [dict(source=one_filter['source']),
dict(chunk_id=one_filter['chunk_id'])]})
else:
# migration for chroma < 0.4
if len(document_choice) == 0 or len(document_choice) >= 1 and document_choice[
0] == DocumentChoice.ALL.value:
chunk_id_filter = 0 if query_action else -1
filter_kwargs = {"filter": {"chunk_id": {"$gte": 0}}} if query_action else \
{"filter": {"chunk_id": {"$eq": -1}}}
filter_kwargs_backup = {"filter": {"chunk_id": {"$gte": 0}}}
elif len(document_choice) >= 2:
if document_choice[0] == DocumentChoice.ALL.value:
document_choice = document_choice[1:]
chunk_id_filter = None
or_filter = [
{"source": {"$eq": x}, "chunk_id": {"$gte": 0}} if query_action else {"source": {"$eq": x},
"chunk_id": {
"$eq": -1}}
for x in document_choice]
filter_kwargs = dict(filter={"$or": or_filter})
or_filter_backup = [
{"source": {"$eq": x}} if query_action else {"source": {"$eq": x}}
for x in document_choice]
filter_kwargs_backup = dict(filter={"$or": or_filter_backup})
elif len(document_choice) == 1:
chunk_id_filter = None
# degenerate UX bug in chroma
one_filter = \
[{"source": {"$eq": x}, "chunk_id": {"$gte": 0}} if query_action else {"source": {"$eq": x},
"chunk_id": {
"$eq": -1}}
for x in document_choice][0]
filter_kwargs = dict(filter=one_filter)
one_filter_backup = \
[{"source": {"$eq": x}} if query_action else {"source": {"$eq": x}}
for x in document_choice][0]
filter_kwargs_backup = dict(filter=one_filter_backup)
else:
chunk_id_filter = None
# shouldn't reach
filter_kwargs = {}
filter_kwargs_backup = {}
# GET DOCS
if document_subset == DocumentSubset.TopKSources.name or query in [None, '', '\n']:
db_documents, db_metadatas = get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs,
text_context_list=text_context_list,
chunk_id_filter=chunk_id_filter)
if len(db_documents) == 0 and filter_kwargs_backup != filter_kwargs:
db_documents, db_metadatas = get_docs_and_meta(db, top_k_docs, filter_kwargs=filter_kwargs_backup,
text_context_list=text_context_list,
chunk_id_filter=chunk_id_filter)
if top_k_docs == -1:
top_k_docs = len(db_documents)
# similar to langchain's chroma's _results_to_docs_and_scores
docs_with_score = [(Document(page_content=result[0], metadata=result[1] or {}), 0)
for result in zip(db_documents, db_metadatas)]
# remove empty content, e.g. from exception version of document, so don't include empty stuff in summarization
docs_with_score = [x for x in docs_with_score if x[0].page_content]
# set in metadata original order of docs
[x[0].metadata.update(orig_index=ii) for ii, x in enumerate(docs_with_score)]
# order documents
doc_hashes = [x.get('doc_hash', 'None') if x.get('doc_hash', 'None') is not None else 'None' for x in
db_metadatas]
if query_action:
doc_chunk_ids = [x.get('chunk_id', 0) if x.get('chunk_id', 0) is not None else 0 for x in db_metadatas]
docs_with_score2 = [x for hx, cx, x in
sorted(zip(doc_hashes, doc_chunk_ids, docs_with_score), key=lambda x: (x[0], x[1]))
if cx >= 0]
else:
assert summarize_action
doc_chunk_ids = [x.get('chunk_id', -1) if x.get('chunk_id', -1) is not None else -1 for x in db_metadatas]
docs_with_score2 = [x for hx, cx, x in
sorted(zip(doc_hashes, doc_chunk_ids, docs_with_score), key=lambda x: (x[0], x[1]))
if cx == -1
]
if len(docs_with_score2) == 0 and len(docs_with_score) > 0:
# old database without chunk_id, migration added 0 but didn't make -1 as that would be expensive
# just do again and relax filter, let summarize operate on actual chunks if nothing else
docs_with_score2 = [x for hx, cx, x in
sorted(zip(doc_hashes, doc_chunk_ids, docs_with_score),
key=lambda x: (x[0], x[1]))
]
docs_with_score = docs_with_score2
docs_with_score = docs_with_score[:top_k_docs]
docs = [x[0] for x in docs_with_score]
scores = [x[1] for x in docs_with_score]
else:
# have query
# for db=None too
with filelock.FileLock(lock_file):
docs_with_score = get_docs_with_score(query_embedding, k_db,
filter_kwargs,
filter_kwargs_backup,
db, db_type,
text_context_list=text_context_list,
chunk_id_filter=chunk_id_filter,
verbose=verbose)
# SELECT PROMPT + DOCS
tokenizer = get_tokenizer(db=db, llm=llm, tokenizer=tokenizer, inference_server=inference_server,
use_openai_model=use_openai_model,
db_type=db_type)
# NOTE: if map_reduce, then no need to auto reduce chunks
if query_action and (top_k_docs == -1 or auto_reduce_chunks):
top_k_docs_tokenize = 100
docs_with_score = docs_with_score[:top_k_docs_tokenize]
if docs_with_score:
estimated_prompt_no_docs = template.format(context='', question=query)
else:
estimated_prompt_no_docs = template_if_no_docs.format(context='', question=query)
chat = True # FIXME?
# first docs_with_score are most important with highest score
estimated_full_prompt, \
instruction, iinput, context, \
num_prompt_tokens, max_new_tokens, \
num_prompt_tokens0, num_prompt_tokens_actual, \
chat_index, external_handle_chat_conversation, \
top_k_docs_trial, one_doc_size, \
truncation_generation = \
get_limited_prompt(estimated_prompt_no_docs,
iinput,
tokenizer,
prompter=prompter,
inference_server=inference_server,
prompt_type=prompt_type,
prompt_dict=prompt_dict,
chat=chat,
max_new_tokens=max_new_tokens,
system_prompt=system_prompt,
context=context,
chat_conversation=chat_conversation,
text_context_list=[x[0].page_content for x in docs_with_score],
keep_sources_in_context=keep_sources_in_context,
model_max_length=model_max_length,
memory_restriction_level=memory_restriction_level,
langchain_mode=langchain_mode,
add_chat_history_to_context=add_chat_history_to_context,
min_max_new_tokens=min_max_new_tokens,
max_input_tokens=max_input_tokens,
truncation_generation=truncation_generation,
gradio_server=gradio_server,
)
# get updated llm
llm_kwargs.update(max_new_tokens=max_new_tokens, context=context, iinput=iinput)
if external_handle_chat_conversation:
# should already have attribute, checking sanity
assert hasattr(llm, 'chat_conversation')
llm_kwargs.update(chat_conversation=chat_conversation[chat_index:])
llm, model_name, streamer, prompt_type_out, async_output, only_new_text, gradio_server = \
get_llm(**llm_kwargs)
# avoid craziness
if 0 < top_k_docs_trial < max_chunks:
# avoid craziness
if top_k_docs == -1:
top_k_docs = top_k_docs_trial
else:
top_k_docs = min(top_k_docs, top_k_docs_trial)
elif top_k_docs_trial >= max_chunks:
top_k_docs = max_chunks
docs_with_score = select_docs_with_score(docs_with_score, top_k_docs, one_doc_size)
elif query_action:
# no limitation or auto-filling, just literal top_k_docs
docs_with_score = select_docs_with_score(docs_with_score, top_k_docs, None)
else:
assert not query_action and summarize_action, "Bad action"
one_doc_size = None
if total_tokens_for_docs is not None:
# used to limit tokens for summarization, e.g. public instance
top_k_docs, one_doc_size, num_doc_tokens = \
get_docs_tokens(tokenizer,
text_context_list=[x[0].page_content for x in docs_with_score],
max_input_tokens=total_tokens_for_docs)
# filter by top_k_docs and maybe one_doc_size
docs_with_score = select_docs_with_score(docs_with_score, top_k_docs, one_doc_size)
# group docs if desired/can to fill context
docs_with_score, max_doc_tokens = split_merge_docs(docs_with_score,
tokenizer,
max_input_tokens=max_input_tokens,
docs_token_handling=docs_token_handling,
joiner=docs_joiner,
verbose=verbose)
# max_input_tokens used min_max_new_tokens as max_new_tokens, so need to assume filled up to that
# but use actual largest token count
data_point = dict(context=context, instruction=query, input=iinput)
prompt_basic = prompter.generate_prompt(data_point)
estimated_prompt_no_docs = template.format(text=prompt_basic)
num_prompt_basic_tokens = get_token_count(estimated_prompt_no_docs, tokenizer)
if truncation_generation:
max_new_tokens = model_max_length - max_doc_tokens - num_prompt_basic_tokens
if os.getenv('HARD_ASSERTS') is not None:
# imperfect calculation, so will see how testing does
assert max_new_tokens >= min_max_new_tokens - 50, "%s %s" % (max_new_tokens, min_max_new_tokens)
# get updated llm
llm_kwargs.update(max_new_tokens=max_new_tokens)
llm, model_name, streamer, prompt_type_out, async_output, only_new_text, gradio_server = \
get_llm(**llm_kwargs)
# now done with all docs and their sizes, re-order docs if required
if query_action:
# not relevant for summarization, including in chunk mode, so process docs in order for summarization or extraction
# put most relevant chunks closest to question,
# esp. if truncation occurs will be "oldest" or "farthest from response" text that is truncated
# BUT: for small models, e.g. 6_9 pythia, if sees some stuff related to h2oGPT first, it can connect that and not listen to rest
if docs_ordering_type in ['best_first']:
pass
elif docs_ordering_type in ['best_near_prompt', 'reverse_sort']:
docs_with_score.reverse()
elif docs_ordering_type in ['', None, 'reverse_ucurve_sort']:
docs_with_score = reverse_ucurve_list(docs_with_score)
else:
raise ValueError("No such docs_ordering_type=%s" % docs_ordering_type)
# cut off so no high distance docs/sources considered
# NOTE: If no query, then distance set was 0 and nothing will be cut
num_docs_before_cut = len(docs_with_score)
docs = [x[0] for x in docs_with_score if x[1] < cut_distance]
scores = [x[1] for x in docs_with_score if x[1] < cut_distance]
if len(scores) > 0 and verbose:
print("Distance: min: %s max: %s mean: %s median: %s" %
(scores[0], scores[-1], np.mean(scores), np.median(scores)), flush=True)
# if HF type and have no docs, could bail out, but makes code too complex
if document_subset in non_query_commands:
# no LLM use at all, just sources
return docs, None, [], num_docs_before_cut, use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text
# FIXME: WIP
common_words_file = "data/NGSL_1.2_stats.csv.zip"
if False and os.path.isfile(common_words_file) and langchain_action == LangChainAction.QUERY.value:
df = pd.read_csv("data/NGSL_1.2_stats.csv.zip")
import string
reduced_query = query.translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation))).strip()
reduced_query_words = reduced_query.split(' ')
set_common = set(df['Lemma'].values.tolist())
num_common = len([x.lower() in set_common for x in reduced_query_words])
frac_common = num_common / len(reduced_query) if reduced_query else 0
# FIXME: report to user bad query that uses too many common words
if verbose:
print("frac_common: %s" % frac_common, flush=True)
if len(docs) == 0:
# avoid context == in prompt then
template = template_if_no_docs
got_any_docs = len(docs) > 0
# update template in case situation changed or did get docs
# then no new documents from database or not used, redo template
# got template earlier as estimate of template token size, here is final used version
template, template_if_no_docs, auto_reduce_chunks, query = \
get_template(query, iinput,
pre_prompt_query, prompt_query,
pre_prompt_summary, prompt_summary,
langchain_action,
got_any_docs,
auto_reduce_chunks,
add_search_to_context)
if langchain_action == LangChainAction.QUERY.value:
if use_template:
# instruct-like, rather than few-shot prompt_type='plain' as default
# but then sources confuse the model with how inserted among rest of text, so avoid
prompt = PromptTemplate(
# input_variables=["summaries", "question"],
input_variables=["context", "question"],
template=template,
)
chain = load_qa_chain(llm, prompt=prompt, verbose=verbose)
else:
# only if use_openai_model = True, unused normally except in testing
chain = load_qa_with_sources_chain(llm)
chain_kwargs = dict(input_documents=docs, question=query)
target = wrapped_partial(chain, chain_kwargs)
elif summarize_action:
if async_output:
return_intermediate_steps = False
else:
return_intermediate_steps = True
if langchain_action == LangChainAction.SUMMARIZE_MAP.value:
prompt = PromptTemplate(input_variables=["text"], template=template)
chain = load_general_summarization_chain(llm, chain_type="map_reduce",
map_prompt=prompt, combine_prompt=prompt,
return_intermediate_steps=return_intermediate_steps,
token_max=max_input_tokens, verbose=verbose)
if async_output:
chain_func = chain.arun
else:
chain_func = chain
target = wrapped_partial(chain_func, dict(input_documents=docs,
token_max=max_input_tokens)) # , return_only_outputs=True)
elif langchain_action == LangChainAction.SUMMARIZE_ALL.value:
assert use_template
prompt = PromptTemplate(input_variables=["text"], template=template)
chain = load_general_summarization_chain(llm, chain_type="stuff", prompt=prompt,
return_intermediate_steps=return_intermediate_steps,
verbose=verbose)
if async_output:
chain_func = chain.arun
else:
chain_func = chain
target = wrapped_partial(chain_func)
elif langchain_action == LangChainAction.SUMMARIZE_REFINE.value:
chain = load_general_summarization_chain(llm, chain_type="refine",
return_intermediate_steps=return_intermediate_steps,
verbose=verbose)
if async_output:
chain_func = chain.arun
else:
chain_func = chain
target = wrapped_partial(chain_func)
elif langchain_action == LangChainAction.EXTRACT.value:
prompt = PromptTemplate(input_variables=["text"], template=template)
chain = load_general_summarization_chain(llm, chain_type="map",
map_prompt=prompt, combine_prompt=prompt,
return_intermediate_steps=return_intermediate_steps,
token_max=max_input_tokens, verbose=verbose)
if async_output:
chain_func = chain.arun
else:
chain_func = chain
target = wrapped_partial(chain_func, dict(input_documents=docs,
token_max=max_input_tokens)) # , return_only_outputs=True)
else:
raise RuntimeError("No such langchain_action=%s" % langchain_action)
else:
raise RuntimeError("No such langchain_action=%s" % langchain_action)
return docs, target, scores, num_docs_before_cut, use_llm_if_no_docs, top_k_docs_max_show, \
llm, model_name, streamer, prompt_type_out, async_output, only_new_text
def get_model_max_length(llm=None, tokenizer=None, inference_server=None, model_name=None):
if hasattr(tokenizer, 'model_max_length'):
return tokenizer.model_max_length
elif inference_server in ['openai', 'openai_azure']:
return llm.modelname_to_contextsize(model_name)
elif inference_server in ['openai_chat', 'openai_azure_chat']:
return model_token_mapping[model_name]
elif isinstance(tokenizer, FakeTokenizer):
# GGML
return tokenizer.model_max_length
else:
return 2048
def get_max_input_tokens(llm=None, tokenizer=None, inference_server=None, model_name=None, max_new_tokens=None):
model_max_length = get_model_max_length(llm=llm, tokenizer=tokenizer, inference_server=inference_server,
model_name=model_name)
if any([inference_server.startswith(x) for x in
['openai', 'openai_azure', 'openai_chat', 'openai_azure_chat', 'vllm']]):
# openai can't handle tokens + max_new_tokens > max_tokens even if never generate those tokens
# and vllm uses OpenAI API with same limits
max_input_tokens = model_max_length - max_new_tokens
elif isinstance(tokenizer, FakeTokenizer):
# don't trust that fake tokenizer (e.g. GGML) will make lots of tokens normally, allow more input
max_input_tokens = model_max_length - min(256, max_new_tokens)
else:
if 'falcon' in model_name or inference_server.startswith('http'):
# allow for more input for falcon, assume won't make as long outputs as default max_new_tokens
# Also allow if TGI or Gradio, because we tell it input may be same as output, even if model can't actually handle
max_input_tokens = model_max_length - min(256, max_new_tokens)
else:
# trust that maybe model will make so many tokens, so limit input
max_input_tokens = model_max_length - max_new_tokens
return max_input_tokens
def get_tokenizer(db=None, llm=None, tokenizer=None, inference_server=None, use_openai_model=False,
db_type='chroma'):
if hasattr(llm, 'pipeline') and hasattr(llm.pipeline, 'tokenizer'):
# more accurate
return llm.pipeline.tokenizer
elif hasattr(llm, 'tokenizer') and llm.tokenizer is not None:
# e.g. TGI client mode etc.
return llm.tokenizer
elif inference_server and any([inference_server.startswith(x) for x in ['openai', 'openai_chat', 'openai_azure',
'openai_azure_chat']]) and \
tokenizer is not None:
return tokenizer
elif isinstance(tokenizer, FakeTokenizer):
return tokenizer
elif use_openai_model:
return FakeTokenizer(is_openai=True)
elif (hasattr(db, '_embedding_function') and
hasattr(db._embedding_function, 'client') and
hasattr(db._embedding_function.client, 'tokenize')):
# in case model is not our pipeline with HF tokenizer
return db._embedding_function.client.tokenize
else:
# backup method
if os.getenv('HARD_ASSERTS'):
assert db_type in ['faiss', 'weaviate']
# use tiktoken for faiss since embedding called differently
return FakeTokenizer()
def get_template(query, iinput,
pre_prompt_query, prompt_query,
pre_prompt_summary, prompt_summary,
langchain_action,
got_any_docs,
auto_reduce_chunks,
add_search_to_context):
if got_any_docs and add_search_to_context:
# modify prompts, assumes patterns like in predefined prompts. If user customizes, then they'd need to account for that.
prompt_query = prompt_query.replace('information in the document sources',
'information in the document and web search sources (and their source dates and website source)')
prompt_summary = prompt_summary.replace('information in the document sources',
'information in the document and web search sources (and their source dates and website source)')
elif got_any_docs and not add_search_to_context:
pass
elif not got_any_docs and add_search_to_context:
# modify prompts, assumes patterns like in predefined prompts. If user customizes, then they'd need to account for that.
prompt_query = prompt_query.replace('information in the document sources',
'information in the web search sources (and their source dates and website source)')
prompt_summary = prompt_summary.replace('information in the document sources',
'information in the web search sources (and their source dates and website source)')
if langchain_action == LangChainAction.QUERY.value:
if iinput:
query = "%s\n%s" % (query, iinput)
if not got_any_docs:
template_if_no_docs = template = """{context}{question}"""
else:
template = """%s
\"\"\"
{context}
\"\"\"
%s{question}""" % (pre_prompt_query, prompt_query)
template_if_no_docs = """{context}{question}"""
elif langchain_action in [LangChainAction.SUMMARIZE_ALL.value, LangChainAction.SUMMARIZE_MAP.value,
LangChainAction.EXTRACT.value]:
none = ['', '\n', None]
# modify prompt_summary if user passes query or iinput
if query not in none and iinput not in none:
prompt_summary = "Focusing on %s, %s, %s" % (query, iinput, prompt_summary)
elif query not in none:
prompt_summary = "Focusing on %s, %s" % (query, prompt_summary)
# don't auto reduce
auto_reduce_chunks = False
if langchain_action in [LangChainAction.SUMMARIZE_MAP.value, LangChainAction.EXTRACT.value]:
fstring = '{text}'
else:
fstring = '{input_documents}'
template = """%s:
\"\"\"
%s
\"\"\"\n%s""" % (pre_prompt_summary, fstring, prompt_summary)
template_if_no_docs = "Exactly only say: There are no documents to summarize/extract from."
elif langchain_action in [LangChainAction.SUMMARIZE_REFINE]:
template = '' # unused
template_if_no_docs = '' # unused
else:
raise RuntimeError("No such langchain_action=%s" % langchain_action)
return template, template_if_no_docs, auto_reduce_chunks, query
def get_sources_answer(query, docs, answer, scores, show_rank,
answer_with_sources, append_sources_to_answer,
show_accordions=True,
show_link_in_sources=True,
top_k_docs_max_show=10,
verbose=False,
t_run=None,
count_input_tokens=None, count_output_tokens=None):
if verbose:
print("query: %s" % query, flush=True)
print("answer: %s" % answer, flush=True)
if len(docs) == 0:
extra = ''
ret = answer + extra
return ret, extra
if answer_with_sources == -1:
extra = [dict(score=score, content=get_doc(x), source=get_source(x), orig_index=x.metadata.get('orig_index', 0))
for score, x in zip(scores, docs)][
:top_k_docs_max_show]
if append_sources_to_answer:
extra_str = [str(x) for x in extra]
ret = answer + '\n\n' + '\n'.join(extra_str)
else:
ret = answer
return ret, extra
# link
answer_sources = [(max(0.0, 1.5 - score) / 1.5,
get_url(doc, font_size=font_size),
get_accordion(doc, font_size=font_size, head_acc=head_acc)) for score, doc in
zip(scores, docs)]
if not show_accordions:
answer_sources_dict = defaultdict(list)
[answer_sources_dict[url].append(score) for score, url in answer_sources]
answers_dict = {}
for url, scores_url in answer_sources_dict.items():
answers_dict[url] = np.max(scores_url)
answer_sources = [(score, url) for url, score in answers_dict.items()]
answer_sources.sort(key=lambda x: x[0], reverse=True)
if show_rank:
# answer_sources = ['%d | %s' % (1 + rank, url) for rank, (score, url) in enumerate(answer_sources)]
# sorted_sources_urls = "Sources [Rank | Link]:<br>" + "<br>".join(answer_sources)
answer_sources = ['%s' % url for rank, (score, url) in enumerate(answer_sources)]
answer_sources = answer_sources[:top_k_docs_max_show]
sorted_sources_urls = "Ranked Sources:<br>" + "<br>".join(answer_sources)
else:
if show_accordions:
if show_link_in_sources:
answer_sources = ['<font size="%s"><li>%.2g | %s</li>%s</font>' % (font_size, score, url, accordion)
for score, url, accordion in answer_sources]
else:
answer_sources = ['<font size="%s"><li>%.2g</li>%s</font>' % (font_size, score, accordion)
for score, url, accordion in answer_sources]
else:
if show_link_in_sources:
answer_sources = ['<font size="%s"><li>%.2g | %s</li></font>' % (font_size, score, url)
for score, url in answer_sources]
else:
answer_sources = ['<font size="%s"><li>%.2g</li></font>' % (font_size, score)
for score, url in answer_sources]
answer_sources = answer_sources[:top_k_docs_max_show]
if show_accordions:
sorted_sources_urls = f"<font size=\"{font_size}\">{source_prefix}<ul></font>" + "".join(answer_sources)
else:
sorted_sources_urls = f"<font size=\"{font_size}\">{source_prefix}<p><ul></font>" + "<p>".join(
answer_sources)
if verbose or True:
if t_run is not None and int(t_run) > 0:
sorted_sources_urls += 'Total Time: %d [s]<p>' % t_run
if count_input_tokens and count_output_tokens:
sorted_sources_urls += 'Input Tokens: %s | Output Tokens: %d<p>' % (
count_input_tokens, count_output_tokens)
sorted_sources_urls += "Total document chunks used: %s<p>" % len(docs)
sorted_sources_urls += f"<font size=\"{font_size}\"></ul></p>{source_postfix}</font>"
title_overall = "Sources"
sorted_sources_urls = f"""<details><summary><font size="{font_size}">{title_overall}</font></summary><font size="{font_size}">{sorted_sources_urls}</font></details>"""
if os.getenv("HARD_ASSERTS"):
assert sorted_sources_urls.startswith(super_source_prefix)
assert sorted_sources_urls.endswith(super_source_postfix)
if isinstance(answer, str) and not answer.endswith('\n'):
answer += '\n'
if answer_with_sources:
extra = '\n' + sorted_sources_urls
else:
extra = ''
if isinstance(answer, str) and append_sources_to_answer:
ret = answer + extra
else:
ret = answer
return ret, extra
def get_any_db(db1s, langchain_mode, langchain_mode_paths, langchain_mode_types,
dbs=None,
load_db_if_exists=None, db_type=None,
use_openai_embedding=None,
hf_embedding_model=None, migrate_embedding_model=None, auto_migrate_db=None,
for_sources_list=False,
verbose=False,
n_jobs=-1,
):
if langchain_mode in [LangChainMode.DISABLED.value, LangChainMode.LLM.value]:
return None
elif for_sources_list and langchain_mode in [LangChainMode.WIKI_FULL.value]:
# NOTE: avoid showing full wiki. Takes about 30 seconds over about 90k entries, but not useful for now
return None
elif langchain_mode in db1s and len(db1s[langchain_mode]) > 1 and db1s[langchain_mode][0]:
return db1s[langchain_mode][0]
elif dbs is not None and langchain_mode in dbs and dbs[langchain_mode] is not None:
return dbs[langchain_mode]
else:
db = None
if db is None:
langchain_type = langchain_mode_types.get(langchain_mode, LangChainTypes.EITHER.value)
persist_directory, langchain_type = get_persist_directory(langchain_mode, db1s=db1s, dbs=dbs,
langchain_type=langchain_type)
langchain_mode_types[langchain_mode] = langchain_type
# see if actually have on disk, don't try to switch embedding yet, since can't use return here
migrate_embedding_model = False
db, _, _ = \
get_existing_db(db, persist_directory, load_db_if_exists, db_type,
use_openai_embedding,
langchain_mode, langchain_mode_paths, langchain_mode_types,
hf_embedding_model, migrate_embedding_model, auto_migrate_db,
verbose=verbose, n_jobs=n_jobs)
if db is not None:
# if found db, then stuff into state, so don't have to reload again that takes time
if langchain_type == LangChainTypes.PERSONAL.value:
assert isinstance(db1s, dict), "db1s wrong type: %s" % type(db1s)
db1 = db1s[langchain_mode] = [db, None, None]
assert len(db1) == length_db1(), "Bad setup: %s" % len(db1)
set_dbid(db1)
else:
assert isinstance(dbs, dict), "dbs wrong type: %s" % type(dbs)
dbs[langchain_mode] = db
return db
def get_sources(db1s, selection_docs_state1, requests_state1, langchain_mode,
dbs=None, docs_state0=None,
load_db_if_exists=None,
db_type=None,
use_openai_embedding=None,
hf_embedding_model=None,
migrate_embedding_model=None,
auto_migrate_db=None,
verbose=False,
get_userid_auth=None,
n_jobs=-1,
):
for k in db1s:
set_dbid(db1s[k])
langchain_mode_paths = selection_docs_state1['langchain_mode_paths']
langchain_mode_types = selection_docs_state1['langchain_mode_types']
set_userid(db1s, requests_state1, get_userid_auth)
db = get_any_db(db1s, langchain_mode, langchain_mode_paths, langchain_mode_types,
dbs=dbs,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
for_sources_list=True,
verbose=verbose,
n_jobs=n_jobs,
)
if langchain_mode in ['LLM'] or db is None:
source_files_added = "NA"
source_list = []
num_chunks = 0
num_sources_str = str(0)
elif langchain_mode in ['wiki_full']:
source_files_added = "Not showing wiki_full, takes about 20 seconds and makes 4MB file." \
" Ask [email protected] for file if required."
source_list = []
num_chunks = 0
num_sources_str = str(0)
elif db is not None:
metadatas = get_metadatas(db, full_required=False)
source_list = sorted(set([x['source'] for x in metadatas]))
source_files_added = '\n'.join(source_list)
num_chunks = len(metadatas)
num_sources_str = ">=%d" % len(source_list)
if is_chroma_db(db):
num_chunks_real = db._collection.count()
if num_chunks_real == num_chunks:
num_sources_str = "=%d" % len(source_list)
else:
num_chunks = num_chunks_real
else:
source_list = []
source_files_added = "None"
num_chunks = 0
num_sources_str = str(0)
sources_dir = "sources_dir"
sources_dir = makedirs(sources_dir, exist_ok=True, tmp_ok=True, use_base=True)
sources_file = os.path.join(sources_dir, 'sources_%s_%s' % (langchain_mode, str(uuid.uuid4())))
with open(sources_file, "wt") as f:
f.write(source_files_added)
source_list = docs_state0 + source_list
if DocumentChoice.ALL.value in source_list:
source_list.remove(DocumentChoice.ALL.value)
return sources_file, source_list, num_chunks, num_sources_str, db
def update_user_db(file, db1s, selection_docs_state1, requests_state1,
langchain_mode=None,
get_userid_auth=None,
**kwargs):
kwargs.update(selection_docs_state1)
set_userid(db1s, requests_state1, get_userid_auth)
if file is None:
raise RuntimeError("Don't use change, use input")
try:
return _update_user_db(file, db1s=db1s,
langchain_mode=langchain_mode,
**kwargs)
except BaseException as e:
print(traceback.format_exc(), flush=True)
# gradio has issues if except, so fail semi-gracefully, else would hang forever in processing textbox
ex_str = "Exception: %s" % str(e)
source_files_added = """\
<html>
<body>
<p>
Sources: <br>
</p>
<div style="overflow-y: auto;height:400px">
{0}
</div>
</body>
</html>
""".format(ex_str)
doc_exception_text = str(e)
return None, langchain_mode, source_files_added, doc_exception_text, None
finally:
clear_torch_cache()
def get_lock_file(db1, langchain_mode):
db_id = get_dbid(db1)
base_path = 'locks'
base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True)
# don't allow db_id to be '' or None, would be bug and lock up everything
if not db_id:
if os.getenv('HARD_ASSERTS'):
raise ValueError("Invalid access for langchain_mode=%s" % langchain_mode)
db_id = str(uuid.uuid4())
lock_file = os.path.join(base_path, "db_%s_%s.lock" % (langchain_mode.replace(' ', '_').replace('/', '_'), db_id))
makedirs(os.path.dirname(lock_file)) # ensure really made
return lock_file
def _update_user_db(file,
db1s=None,
langchain_mode='UserData',
chunk=None, chunk_size=None,
# urls
use_unstructured=True,
use_playwright=False,
use_selenium=False,
# pdfs
use_pymupdf='auto',
use_unstructured_pdf='auto',
use_pypdf='auto',
enable_pdf_ocr='auto',
enable_pdf_doctr='auto',
try_pdf_as_html='auto',
# images
enable_ocr=False,
enable_doctr=False,
enable_pix2struct=False,
enable_captions=True,
captions_model=None,
caption_loader=None,
doctr_loader=None,
pix2struct_loader=None,
# json
jq_schema='.[]',
dbs=None, db_type=None,
langchain_modes=None,
langchain_mode_paths=None,
langchain_mode_types=None,
use_openai_embedding=None,
hf_embedding_model=None,
migrate_embedding_model=None,
auto_migrate_db=None,
verbose=None,
n_jobs=-1,
is_url=None, is_txt=None,
):
assert db1s is not None
assert chunk is not None
assert chunk_size is not None
assert use_openai_embedding is not None
assert hf_embedding_model is not None
assert migrate_embedding_model is not None
assert auto_migrate_db is not None
assert caption_loader is not None
assert doctr_loader is not None
assert enable_captions is not None
assert captions_model is not None
assert enable_ocr is not None
assert enable_doctr is not None
assert enable_pdf_ocr is not None
assert enable_pdf_doctr is not None
assert enable_pix2struct is not None
assert verbose is not None
if dbs is None:
dbs = {}
assert isinstance(dbs, dict), "Wrong type for dbs: %s" % str(type(dbs))
# handle case of list of temp buffer
if isinstance(file, str) and file.strip().startswith('['):
try:
file = ast.literal_eval(file.strip())
except Exception as e:
print("Tried to parse %s as list but failed: %s" % (file, str(e)), flush=True)
if isinstance(file, list) and len(file) > 0 and hasattr(file[0], 'name'):
file = [x.name for x in file]
# handle single file of temp buffer
if hasattr(file, 'name'):
file = file.name
if not isinstance(file, (list, tuple, typing.Generator)) and isinstance(file, str):
file = [file]
if langchain_mode == LangChainMode.DISABLED.value:
return None, langchain_mode, get_source_files(), "", None
if langchain_mode in [LangChainMode.LLM.value]:
# then switch to MyData, so langchain_mode also becomes way to select where upload goes
# but default to mydata if nothing chosen, since safest
if LangChainMode.MY_DATA.value in langchain_modes:
langchain_mode = LangChainMode.MY_DATA.value
elif len(langchain_modes) >= 1:
langchain_mode = langchain_modes[0]
else:
return None, langchain_mode, get_source_files(), "", None
if langchain_mode_paths is None:
langchain_mode_paths = {}
user_path = langchain_mode_paths.get(langchain_mode)
# UserData or custom, which has to be from user's disk
if user_path is not None:
# move temp files from gradio upload to stable location
for fili, fil in enumerate(file):
if isinstance(fil, str) and os.path.isfile(fil): # not url, text
new_fil = os.path.normpath(os.path.join(user_path, os.path.basename(fil)))
if os.path.normpath(os.path.abspath(fil)) != os.path.normpath(os.path.abspath(new_fil)):
if os.path.isfile(new_fil):
remove(new_fil)
try:
if os.path.dirname(new_fil):
makedirs(os.path.dirname(new_fil))
shutil.move(fil, new_fil)
except FileExistsError:
pass
file[fili] = new_fil
if verbose:
print("Adding %s" % file, flush=True)
# FIXME: could avoid even parsing, let alone embedding, same old files if upload same file again
# FIXME: but assume nominally user isn't uploading all files over again from UI
# expect string comparison, if dict then model object with name and get name not dict or model
hf_embedding_model_str = get_hf_embedding_model_name(hf_embedding_model)
if is_txt and hf_embedding_model_str == 'fake':
# avoid parallel if fake embedding since assume trivial ingestion
n_jobs = 1
sources = path_to_docs(file if not is_url and not is_txt else None,
verbose=verbose,
fail_any_exception=False,
n_jobs=n_jobs,
chunk=chunk, chunk_size=chunk_size,
url=file if is_url else None,
text=file if is_txt else None,
# urls
use_unstructured=use_unstructured,
use_playwright=use_playwright,
use_selenium=use_selenium,
# pdfs
use_pymupdf=use_pymupdf,
use_unstructured_pdf=use_unstructured_pdf,
use_pypdf=use_pypdf,
enable_pdf_ocr=enable_pdf_ocr,
enable_pdf_doctr=enable_pdf_doctr,
try_pdf_as_html=try_pdf_as_html,
# images
enable_ocr=enable_ocr,
enable_doctr=enable_doctr,
enable_pix2struct=enable_pix2struct,
enable_captions=enable_captions,
captions_model=captions_model,
caption_loader=caption_loader,
doctr_loader=doctr_loader,
pix2struct_loader=pix2struct_loader,
# json
jq_schema=jq_schema,
db_type=db_type,
)
exceptions = [x for x in sources if x.metadata.get('exception')]
exceptions_strs = [x.metadata['exception'] for x in exceptions]
sources = [x for x in sources if 'exception' not in x.metadata]
# below must at least come after langchain_mode is modified in case was LLM -> MyData,
# so original langchain mode changed
for k in db1s:
set_dbid(db1s[k])
db1 = get_db1(db1s, langchain_mode)
lock_file = get_lock_file(db1s[LangChainMode.MY_DATA.value], langchain_mode) # user-level lock, not db-level lock
with filelock.FileLock(lock_file):
if langchain_mode in db1s:
if db1[0] is not None:
# then add
db, num_new_sources, new_sources_metadata = add_to_db(db1[0], sources, db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model)
else:
# in testing expect:
# assert len(db1) == length_db1() and db1[1] is None, "Bad MyData db: %s" % db1
# for production hit, when user gets clicky:
assert len(db1) == length_db1(), "Bad %s db: %s" % (langchain_mode, db1)
assert get_dbid(db1) is not None, "db hash was None, not allowed"
# then create
# if added has to original state and didn't change, then would be shared db for all users
langchain_type = langchain_mode_types.get(langchain_mode, LangChainTypes.EITHER.value)
persist_directory, langchain_type = get_persist_directory(langchain_mode, db1s=db1s, dbs=dbs,
langchain_type=langchain_type)
langchain_mode_types[langchain_mode] = langchain_type
db = get_db(sources, use_openai_embedding=use_openai_embedding,
db_type=db_type,
persist_directory=persist_directory,
langchain_mode=langchain_mode,
langchain_mode_paths=langchain_mode_paths,
langchain_mode_types=langchain_mode_types,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
n_jobs=n_jobs)
if db is not None:
db1[0] = db
source_files_added = get_source_files(db=db1[0], exceptions=exceptions)
if len(sources) > 0:
sources_last = os.path.basename(sources[-1].metadata.get('source', 'Unknown Source'))
else:
sources_last = None
return None, langchain_mode, source_files_added, '\n'.join(exceptions_strs), sources_last
else:
langchain_type = langchain_mode_types.get(langchain_mode, LangChainTypes.EITHER.value)
persist_directory, langchain_type = get_persist_directory(langchain_mode, db1s=db1s, dbs=dbs,
langchain_type=langchain_type)
langchain_mode_types[langchain_mode] = langchain_type
if langchain_mode in dbs and dbs[langchain_mode] is not None:
# then add
db, num_new_sources, new_sources_metadata = add_to_db(dbs[langchain_mode], sources, db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model)
else:
# then create. Or might just be that dbs is unfilled, then it will fill, then add
db = get_db(sources, use_openai_embedding=use_openai_embedding,
db_type=db_type,
persist_directory=persist_directory,
langchain_mode=langchain_mode,
langchain_mode_paths=langchain_mode_paths,
langchain_mode_types=langchain_mode_types,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
n_jobs=n_jobs)
dbs[langchain_mode] = db
# NOTE we do not return db, because function call always same code path
# return dbs[langchain_mode]
# db in this code path is updated in place
source_files_added = get_source_files(db=dbs[langchain_mode], exceptions=exceptions)
if len(sources) > 0:
sources_last = os.path.basename(sources[-1].metadata.get('source', 'Unknown Source'))
else:
sources_last = None
return None, langchain_mode, source_files_added, '\n'.join(exceptions_strs), sources_last
def get_source_files_given_langchain_mode(db1s, selection_docs_state1, requests_state1, document_choice1,
langchain_mode,
dbs=None,
load_db_if_exists=None,
db_type=None,
use_openai_embedding=None,
hf_embedding_model=None,
migrate_embedding_model=None,
auto_migrate_db=None,
verbose=False,
get_userid_auth=None,
delete_sources=False,
n_jobs=-1):
langchain_mode_paths = selection_docs_state1['langchain_mode_paths']
langchain_mode_types = selection_docs_state1['langchain_mode_types']
set_userid(db1s, requests_state1, get_userid_auth)
db = get_any_db(db1s, langchain_mode, langchain_mode_paths, langchain_mode_types,
dbs=dbs,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
for_sources_list=True,
verbose=verbose,
n_jobs=n_jobs,
)
if delete_sources:
del_from_db(db, document_choice1, db_type=db_type)
if langchain_mode in ['LLM'] or db is None:
return "Sources: N/A"
return get_source_files(db=db, exceptions=None)
def get_source_files(db=None, exceptions=None, metadatas=None):
if exceptions is None:
exceptions = []
# only should be one source, not confused
# assert db is not None or metadatas is not None
# clicky user
if db is None and metadatas is None:
return "No Sources at all"
if metadatas is None:
source_label = "Sources:"
if db is not None:
metadatas = get_metadatas(db, full_required=False)
else:
metadatas = []
adding_new = False
else:
source_label = "New Sources:"
adding_new = True
# below automatically de-dups
small_dict = {get_url(x['source'], from_str=True, short_name=True): get_short_name(x.get('head')) for x in
metadatas if x.get('page', 0) == 0}
# if small_dict is empty dict, that's ok
df = pd.DataFrame(small_dict.items(), columns=['source', 'head'])
df.index = df.index + 1
df.index.name = 'index'
source_files_added = tabulate.tabulate(df, headers='keys', tablefmt='unsafehtml')
if exceptions:
exception_metadatas = [x.metadata for x in exceptions]
small_dict = {get_url(x['source'], from_str=True, short_name=True): get_short_name(x.get('exception')) for x in
exception_metadatas}
# if small_dict is empty dict, that's ok
df = pd.DataFrame(small_dict.items(), columns=['source', 'exception'])
df.index = df.index + 1
df.index.name = 'index'
exceptions_html = tabulate.tabulate(df, headers='keys', tablefmt='unsafehtml')
else:
exceptions_html = ''
if metadatas and exceptions:
source_files_added = """\
<html>
<body>
<p>
{0} <br>
</p>
<div style="overflow-y: auto;height:400px">
{1}
{2}
</div>
</body>
</html>
""".format(source_label, source_files_added, exceptions_html)
elif metadatas:
source_files_added = """\
<html>
<body>
<p>
{0} <br>
</p>
<div style="overflow-y: auto;height:400px">
{1}
</div>
</body>
</html>
""".format(source_label, source_files_added)
elif exceptions_html:
source_files_added = """\
<html>
<body>
<p>
Exceptions: <br>
</p>
<div style="overflow-y: auto;height:400px">
{0}
</div>
</body>
</html>
""".format(exceptions_html)
else:
if adding_new:
source_files_added = "No New Sources"
else:
source_files_added = "No Sources"
return source_files_added
def update_and_get_source_files_given_langchain_mode(db1s,
selection_docs_state,
requests_state,
langchain_mode, chunk, chunk_size,
# urls
use_unstructured=True,
use_playwright=False,
use_selenium=False,
# pdfs
use_pymupdf='auto',
use_unstructured_pdf='auto',
use_pypdf='auto',
enable_pdf_ocr='auto',
enable_pdf_doctr='auto',
try_pdf_as_html='auto',
# images
enable_ocr=False,
enable_doctr=False,
enable_pix2struct=False,
enable_captions=True,
captions_model=None,
caption_loader=None,
doctr_loader=None,
pix2struct_loader=None,
# json
jq_schema='.[]',
dbs=None, first_para=None,
hf_embedding_model=None,
use_openai_embedding=None,
migrate_embedding_model=None,
auto_migrate_db=None,
text_limit=None,
db_type=None, load_db_if_exists=None,
n_jobs=None, verbose=None, get_userid_auth=None):
set_userid(db1s, requests_state, get_userid_auth)
assert hf_embedding_model is not None
assert migrate_embedding_model is not None
assert auto_migrate_db is not None
langchain_mode_paths = selection_docs_state['langchain_mode_paths']
langchain_mode_types = selection_docs_state['langchain_mode_types']
has_path = {k: v for k, v in langchain_mode_paths.items() if v}
if langchain_mode in [LangChainMode.LLM.value, LangChainMode.MY_DATA.value]:
# then assume user really meant UserData, to avoid extra clicks in UI,
# since others can't be on disk, except custom user modes, which they should then select to query it
if LangChainMode.USER_DATA.value in has_path:
langchain_mode = LangChainMode.USER_DATA.value
db = get_any_db(db1s, langchain_mode, langchain_mode_paths, langchain_mode_types,
dbs=dbs,
load_db_if_exists=load_db_if_exists,
db_type=db_type,
use_openai_embedding=use_openai_embedding,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
for_sources_list=True,
verbose=verbose,
n_jobs=n_jobs,
)
# not designed for older way of using openai embeddings, why use_openai_embedding=False
# use_openai_embedding, hf_embedding_model passed in and possible different values used,
# but no longer used here or in calling functions so ok
db, num_new_sources, new_sources_metadata = make_db(use_openai_embedding=False,
hf_embedding_model=hf_embedding_model,
migrate_embedding_model=migrate_embedding_model,
auto_migrate_db=auto_migrate_db,
first_para=first_para, text_limit=text_limit,
chunk=chunk,
chunk_size=chunk_size,
# urls
use_unstructured=use_unstructured,
use_playwright=use_playwright,
use_selenium=use_selenium,
# pdfs
use_pymupdf=use_pymupdf,
use_unstructured_pdf=use_unstructured_pdf,
use_pypdf=use_pypdf,
enable_pdf_ocr=enable_pdf_ocr,
enable_pdf_doctr=enable_pdf_doctr,
try_pdf_as_html=try_pdf_as_html,
# images
enable_ocr=enable_ocr,
enable_doctr=enable_doctr,
enable_pix2struct=enable_pix2struct,
enable_captions=enable_captions,
captions_model=captions_model,
caption_loader=caption_loader,
doctr_loader=doctr_loader,
pix2struct_loader=pix2struct_loader,
# json
jq_schema=jq_schema,
langchain_mode=langchain_mode,
langchain_mode_paths=langchain_mode_paths,
langchain_mode_types=langchain_mode_types,
db_type=db_type,
load_db_if_exists=load_db_if_exists,
db=db,
n_jobs=n_jobs,
verbose=verbose)
# during refreshing, might have "created" new db since not in dbs[] yet, so insert back just in case
# so even if persisted, not kept up-to-date with dbs memory
if langchain_mode in db1s:
db1s[langchain_mode][0] = db
else:
dbs[langchain_mode] = db
# return only new sources with text saying such
return get_source_files(db=None, exceptions=None, metadatas=new_sources_metadata)
def get_db1(db1s, langchain_mode1):
if langchain_mode1 in db1s:
db1 = db1s[langchain_mode1]
else:
# indicates to code that not personal database
db1 = [None] * length_db1()
return db1
def clean_doc(docs1):
if not isinstance(docs1, (list, tuple, types.GeneratorType)):
docs1 = [docs1]
for doci, doc in enumerate(docs1):
docs1[doci].page_content = '\n'.join([x.strip() for x in doc.page_content.split("\n") if x.strip()])
return docs1
def clone_documents(documents: Iterable[Document]) -> List[Document]:
# first clone documents
new_docs = []
for doc in documents:
new_doc = Document(page_content=doc.page_content, metadata=copy.deepcopy(doc.metadata))
new_docs.append(new_doc)
return new_docs
def get_db_from_hf(dest=".", db_dir='db_dir_DriverlessAI_docs.zip'):
from huggingface_hub import hf_hub_download
# True for case when locally already logged in with correct token, so don't have to set key
token = os.getenv('HUGGING_FACE_HUB_TOKEN', True)
path_to_zip_file = hf_hub_download('h2oai/db_dirs', db_dir, token=token, repo_type='dataset')
import zipfile
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
persist_directory = os.path.dirname(zip_ref.namelist()[0])
remove(persist_directory)
zip_ref.extractall(dest)
return path_to_zip_file
# Note dir has space in some cases, while zip does not
some_db_zips = [['db_dir_DriverlessAI_docs.zip', 'db_dir_DriverlessAI docs', 'CC-BY-NC license'],
['db_dir_UserData.zip', 'db_dir_UserData', 'CC-BY license for ArXiv'],
['db_dir_github_h2oGPT.zip', 'db_dir_github h2oGPT', 'ApacheV2 license'],
['db_dir_wiki.zip', 'db_dir_wiki', 'CC-BY-SA Wikipedia license'],
# ['db_dir_wiki_full.zip', 'db_dir_wiki_full.zip', '23GB, 05/04/2023 CC-BY-SA Wiki license'],
]
all_db_zips = some_db_zips + \
[['db_dir_wiki_full.zip', 'db_dir_wiki_full.zip', '23GB, 05/04/2023 CC-BY-SA Wiki license'],
]
def get_some_dbs_from_hf(dest='.', db_zips=None):
if db_zips is None:
db_zips = some_db_zips
for db_dir, dir_expected, license1 in db_zips:
path_to_zip_file = get_db_from_hf(dest=dest, db_dir=db_dir)
assert os.path.isfile(path_to_zip_file), "Missing zip in %s" % path_to_zip_file
if dir_expected:
assert os.path.isdir(os.path.join(dest, dir_expected)), "Missing path for %s" % dir_expected
assert os.path.isdir(
os.path.join(dest, dir_expected, 'index')), "Missing index in %s" % dir_expected
def _create_local_weaviate_client():
WEAVIATE_URL = os.getenv('WEAVIATE_URL', "http://localhost:8080")
WEAVIATE_USERNAME = os.getenv('WEAVIATE_USERNAME')
WEAVIATE_PASSWORD = os.getenv('WEAVIATE_PASSWORD')
WEAVIATE_SCOPE = os.getenv('WEAVIATE_SCOPE', "offline_access")
resource_owner_config = None
try:
import weaviate
from weaviate.embedded import EmbeddedOptions
if WEAVIATE_USERNAME is not None and WEAVIATE_PASSWORD is not None:
resource_owner_config = weaviate.AuthClientPassword(
username=WEAVIATE_USERNAME,
password=WEAVIATE_PASSWORD,
scope=WEAVIATE_SCOPE
)
# if using remote server, don't choose persistent directory
client = weaviate.Client(WEAVIATE_URL, auth_client_secret=resource_owner_config)
return client
except Exception as e:
print(f"Failed to create Weaviate client: {e}")
return None
if __name__ == '__main__':
pass
| [
"None",
"Exactly only say: There are no documents to summarize/extract from.",
"Focusing on PLACEHOLDER, PLACEHOLDER",
"PLACEHOLDER:\n\"\"\"\nPLACEHOLDER\n\"\"\"\nPLACEHOLDER",
"PLACEHOLDER\n\"\"\"\n{context}\n\"\"\"\nPLACEHOLDER{question}",
"question",
"information in the document sources",
"information in the document and web search sources (and their source dates and website source)",
"You are a zero shot react agent.\nConsider to prompt of Question that was original query from the user.\nRespond to prompt of Thought with a thought that may lead to a reasonable new action choice.\nRespond to prompt of Action with an action to take out of the tools given, giving exactly single word for the tool name.\nRespond to prompt of Action Input with an input to give the tool.\nConsider to prompt of Observation that was response from the tool.\nRepeat this Thought, Action, Action Input, Observation, Thought sequence several times with new and different thoughts and actions each time, do not repeat.\nOnce satisfied that the thoughts, responses are sufficient to answer the question, then respond to prompt of Thought with: I now know the final answer\nRespond to prompt of Final Answer with your final high-quality bullet list answer to the original query.\n",
"{context}{question}",
"prompt_basic",
"PLACEHOLDERPLACEHOLDER",
"context",
"plain",
"information in the web search sources (and their source dates and website source)",
"hyde_template",
"{query}\n\n{answer}",
"[]",
"human_bot",
"Focusing on PLACEHOLDER, PLACEHOLDER, PLACEHOLDER"
] |
2024-01-10 | TrainGRC/llm-examples | token_counting_with_tiktoken.py | #!/usr/bin/env python3
import subprocess
import sys
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
try:
import openai
except ImportError:
print("Installing module: openai")
install('openai')
import openai
try:
import tiktoken
except ImportError:
print("Installing module: tiktoken")
install('tiktoken')
import tiktoken
try:
from termcolor import colored
except ImportError:
print("Installing module: termcolor")
install('termcolor')
from termcolor import colored
print("")
openai.api_key = input("Please enter your OpenAI API key: ")
MAX_TOKENS = 512
def num_tokens_from_messages(messages):
"""Returns the number of tokens used by a list of messages."""
model = "gpt-3.5-turbo"
encoding = tiktoken.encoding_for_model(model)
num_tokens = 0
for message in messages:
num_tokens += len(encoding.encode(message["content"]))
return num_tokens
def openai_chat_completion(messages):
"""Returns the response from the OpenAI API given a list of messages."""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
return response.choices[0].message['content']
conversation_history = []
while True:
user_input = input("\nPlease enter your request (type 'q' or 'quit' to exit): ")
if user_input.lower() in ['q', 'quit']:
break
new_message = {"role": "user", "content": user_input}
if num_tokens_from_messages(conversation_history + [new_message]) > MAX_TOKENS:
print("Conversation history is too long and needs to be truncated.")
continue
conversation_history.append(new_message)
print(colored("\nFull conversation history:", 'red'))
for message in conversation_history:
print(message)
print(colored(f"\n{num_tokens_from_messages(conversation_history)} tokens", 'green'))
response_content = openai_chat_completion(conversation_history)
conversation_history.append({"role": "assistant", "content": response_content})
print(f"\nResponse: \n{response_content}") | [] |
2024-01-10 | TrainGRC/llm-examples | basic_context_management.py | #!/usr/bin/env python3
import subprocess
import sys
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
try:
import openai
except ImportError:
print("Installing module: openai")
install('openai')
import openai
try:
import tiktoken
except ImportError:
print("Installing module: tiktoken")
install('tiktoken')
import tiktoken
try:
from termcolor import colored
except ImportError:
print("Installing module: termcolor")
install('termcolor')
from termcolor import colored
print("")
openai.api_key = input("Please enter your OpenAI API key: ")
MAX_TOKENS = 512
def num_tokens_from_messages(messages):
"""Returns the number of tokens used by a list of messages."""
model = "gpt-3.5-turbo"
encoding = tiktoken.encoding_for_model(model)
num_tokens = 0
for message in messages:
num_tokens += len(encoding.encode(message["content"]))
return num_tokens
def openai_chat_completion(messages):
"""Returns the response from the OpenAI API given an array of messages."""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
return response.choices[0].message['content']
def truncate_by_removing(conversation_history):
"""Removes the oldest messages until the conversation history is short enough."""
print(colored("Removing oldest messages.", 'red'))
while num_tokens_from_messages(conversation_history) > MAX_TOKENS:
conversation_history.pop(0)
return conversation_history
def truncate_by_summarizing(conversation_history):
"""Summarizes the oldest messages until the conversation history is short enough."""
print(colored("Removing oldest messages.", 'red'))
messages_to_remove = []
while num_tokens_from_messages(conversation_history) > MAX_TOKENS:
messages_to_remove.append(conversation_history.pop(0))
if messages_to_remove:
messages_to_remove.append({"role": "system", "content": "You are responsible for summarizing the previous conversation."})
summary = openai_chat_completion(messages_to_remove)
print(colored(f"\nSummary of removed messages: \n{summary}", 'yellow'))
conversation_history.insert(0, {"role": "assistant", "content": f'Summary of Removed Messages: {summary}'})
return conversation_history
conversation_history = []
while True:
if num_tokens_from_messages(conversation_history) > MAX_TOKENS:
trunc_method = input(colored("\nConversation history is too long. Would you like to remove the oldest messages until it fits or summarize the conversation? ", 'red'))
if trunc_method.lower() in ['summarize', 's']:
conversation_history = truncate_by_summarizing(conversation_history)
elif trunc_method.lower() in ['remove', 'r']:
conversation_history = truncate_by_removing(conversation_history)
user_input = input("\nPlease enter your request (type 'q' or 'quit' to exit): ")
if user_input.lower() in ['q', 'quit']:
break
new_message = {"role": "user", "content": user_input}
conversation_history.append(new_message)
print(colored(f"\n{num_tokens_from_messages(conversation_history)} tokens", 'green'))
print(colored("\nFull conversation history:", 'blue'))
for message in conversation_history:
print(message)
response_content = openai_chat_completion(conversation_history)
conversation_history.append({"role": "assistant", "content": response_content})
print(f"\nResponse: \n{response_content}") | [
"You are responsible for summarizing the previous conversation.",
"Summary of Removed Messages: PLACEHOLDER"
] |
2024-01-10 | DCoinHub/sparse_feedback | scripts~llm_feedback_rankings.py | import os
import csv
import time
import openai
import argparse
import pandas as pd
from tqdm import tqdm
from constants import RANKINGS_PROMPT
parser = argparse.ArgumentParser()
parser.add_argument('--gpt_version', choices=['gpt-3.5-turbo', 'gpt-4'], default='gpt-3.5-turbo')
parser.add_argument('--input_csv', type = str, default = 'chatgpt_feedback/without_dolly/test_pairwise_data.csv')
parser.add_argument('--save_feedback_csv', type = str, default = None)
parser.add_argument('--start_index', type = int, default = 0)
args = parser.parse_args()
PROMPT_DICT = {
"prompt_input": (
"{instruction}\n\nInput:\n{input}"
),
"prompt_no_input": (
"{instruction}"
),
}
def get_reward(instruction, input, output_1, output_2):
if str(input) == "":
print('here')
instruction = PROMPT_DICT['prompt_no_input'].format(instruction = instruction)
prompt = RANKINGS_PROMPT.format(instruction = instruction, output_1 = output_1, output_2 = output_2)
else:
instruction = PROMPT_DICT['prompt_input'].format(instruction = instruction, input = input)
prompt = RANKINGS_PROMPT.format(instruction = instruction, output_1 = output_1, output_2 = output_2)
messages = [{"role": "user", "content": prompt}]
return messages
def main():
df = pd.read_csv(args.input_csv)
df = df.iloc[args.start_index:]
for j in tqdm(range(len(df))):
try:
instruction = df.iloc[j]['instruction']
input = df.iloc[j]['input']
output1 = df.iloc[j]['response1']
output2 = df.iloc[j]['response2']
completion = openai.ChatCompletion.create(
model = args.gpt_version,
messages = get_reward(instruction, input, output1, output2))
feedback_1 = completion['choices'][0]['message']['content']
completion = openai.ChatCompletion.create(
model = args.gpt_version,
messages = get_reward(instruction, input, output2, output1))
feedback_2 = completion['choices'][0]['message']['content']
if '(a)' in feedback_1 and '(b)' in feedback_2:
feedback = '(a)'
elif '(b)' in feedback_1 and '(a)' in feedback_2:
feedback = '(b)'
elif '(a)' in feedback_1 and '(a)' in feedback_2:
feedback = 'equal'
elif '(b)' in feedback_1 and '(b)' in feedback_2:
feedback = 'equal'
else:
continue
print(feedback_1, feedback_2, feedback)
with open(args.save_feedback_csv, 'a') as f:
csvwriter = csv.writer(f)
csvwriter.writerow([instruction, input, output1, output2, feedback])
except:
print('Sleeping...')
time.sleep(5)
if __name__ == '__main__':
main()
| [
"{'prompt_input': '{instruction}\\n\\nInput:\\n{input}', 'prompt_no_input': '{instruction}'}"
] |
2024-01-10 | Akasxh/TutorAI | tutor_model.py | from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
# from langchain.chat_models import ChatOpenAI
import templates
import json
class Tutor:
def __init__(self, API_KEY, role, domain=None):
self.role = role
self.API_KEY = API_KEY
if self.role == "Learn":
situation = PromptTemplate.from_template(
templates.role_templates[self.role]
).format(domain=domain)
else:
situation = templates.role_templates[self.role]
self.situation = situation
# returns a questions based on given role
def get_questions(self):
llm = OpenAI(openai_api_key=self.API_KEY, temperature=0.6)
questions = llm.predict(self.situation)
return questions.split("\n")
def rate_answer(self, question, answer):
llm = OpenAI(openai_api_key=self.API_KEY, temperature=0)
prompt = PromptTemplate.from_template(templates.answer_rating_template).format(
question=question,
answer=answer,
situation=templates.suggestion_situation[self.role],
)
rating = llm.predict(prompt)
# print("DEBUG:", rating)
rating = json.loads(rating)
self.rating = rating
return rating
def get_suggestion(self, question, answer):
# sorting dict wrt values
# to get key with lowest rating_score
rating = dict(sorted(self.rating.items(), key=lambda item: item[1]))
# first key of this dict (lowest rating)
key = list(rating.keys())[0]
llm = OpenAI(openai_api_key=self.API_KEY, temperature=0.6)
prompt = PromptTemplate.from_template(templates.suggestion_template).format(
question=question,
answer=answer,
key=key,
situation=templates.suggestion_situation[self.role],
)
suggestion = llm.predict(prompt)
return suggestion
| [] |
2024-01-10 | ashllxyy/lynx | instruct-tune.py | import time
import json
import os
import random
import re
import string
from functools import partial
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, OpenAI, LLMChain
from multiprocessing import Pool
import openai
import io
import numpy as np
import tqdm
from rouge_score import rouge_scorer
def encode_prompt(prompt_instructions):
"""Encode multiple prompt instructions into a single string."""
prompt = ""
for idx, task_dict in enumerate(prompt_instructions):
(instruction, input, output) = task_dict["instruction"], task_dict["input"], task_dict["output"]
instruction = re.sub(r"\s+", " ", instruction).strip().rstrip(":")
# Check if input is a string before calling .lower()
input = "<noinput>" if isinstance(input, float) or input.strip() == "" else input
prompt += f"###\n"
prompt += f"{idx + 1}. Instruction: {instruction}\n"
prompt += f"{idx + 1}. Input:\n{input}\n"
prompt += f"{idx + 1}. Output:\n{output}\n"
prompt += f"###\n"
prompt += f"{idx + 2}. Instruction:"
return prompt
def find_word_in_string(w, s):
return re.compile(r"\b({0})\b".format(w), flags=re.IGNORECASE).search(s)
def _make_w_io_base(f, mode: str):
if not isinstance(f, io.IOBase):
f_dirname = os.path.dirname(f)
if f_dirname != "":
os.makedirs(f_dirname, exist_ok=True)
f = open(f, mode=mode)
return f
def _make_r_io_base(f, mode: str):
if not isinstance(f, io.IOBase):
f = open(f, mode=mode)
return f
def jdump(obj, f, mode="w", indent=4, default=str):
"""Dump a str or dictionary to a file in json format.
Args:
obj: An object to be written.
f: A string path to the location on disk.
mode: Mode for opening the file.
indent: Indent for storing json dictionaries.
default: A function to handle non-serializable entries; defaults to `str`.
"""
f = _make_w_io_base(f, mode)
if isinstance(obj, (dict, list)):
json.dump(obj, f, indent=indent, default=default)
elif isinstance(obj, str):
f.write(obj)
else:
raise ValueError(f"Unexpected type: {type(obj)}")
f.close()
def jload(f, mode="r"):
"""Load a .json file into a dictionary."""
f = _make_r_io_base(f, mode)
jdict = json.load(f)
f.close()
return jdict
def post_process_gpt4_response(num_prompt_instructions, response):
if response is None:
return []
print(response)
raw_instructions = f"{num_prompt_instructions+1}. Instruction:" + response
raw_instructions = re.split("###", raw_instructions)
instructions = []
for idx, inst in enumerate(raw_instructions):
idx += num_prompt_instructions + 1
splitted_data = re.split(f"{idx}\.\s+(Instruction|Input|Output):", inst)
if len(splitted_data) != 7:
continue
else:
inst = splitted_data[2].strip()
input = splitted_data[4].strip()
input = "" if input.lower() == "<noinput>" else input
output = splitted_data[6].strip()
# filter out too short or too long instructions
if len(inst.split()) <= 3 or len(inst.split()) > 150:
continue
# filter based on keywords.
blacklist = [
"image",
"images",
"chart",
"charts",
"photo",
"photos",
"file",
"files",
"map",
"maps",
"draw",
"plot",
"go to",
"video",
"audio",
"music",
"flowchart",
"diagram",
]
blacklist += []
if any(find_word_in_string(word, inst) for word in blacklist):
continue
if inst.startswith("Write a program"):
continue
if inst[0] in string.punctuation:
continue
instructions.append({"instruction": inst, "input": input, "output": output})
return instructions
def read_prompt_file(file_path):
try:
with open(file_path, 'r', encoding='utf-8') as file:
return file.read()
except UnicodeDecodeError as e:
print(f"Error reading 'prompt.txt': {e}. Skipping the problematic characters.")
with open(file_path, 'rb') as file:
content = file.read()
content = content.decode('utf-8', errors='ignore')
return content
def read_large_context(file_path, num_splits):
context = read_prompt_file(file_path=file_path)
context_splits = [context[i:i + len(context) // num_splits] for i in range(0, len(context), len(context) // num_splits)]
return context_splits
def generate_instruction_following_data(
api_key: str,
output_dir="./",
seed_tasks_path="./seed_tasks.json",
num_instructions_to_generate=100,
model_name="gpt-4",
num_prompt_instructions=3,
request_batch_size=5,
temperature=0.1,
max_tokens=2000,
frequency_penalty=0,
presence_penalty=0,
top_p=0.75,
num_cpus=16,
context_split=1800
):
print("We are at the beginning of the code now!")
# Load JSON data from a file
with open(seed_tasks_path, 'r') as f:
seed_tasks = json.load(f)
# Transforms the data
seed_instruction_data = [
{"instruction": t["instruction"], "input": t["instances"][0]["input"], "output": t["instances"][0]["output"]}
for t in seed_tasks
]
print(f"Loaded {len(seed_instruction_data)} human-written seed instructions")
os.makedirs(output_dir, exist_ok=True)
request_idx = 390 #38 # 37 # 9 # 8 # 2 # 0
# loads the LM-generated instructions
machine_instruction_data = []
if os.path.exists(os.path.join(output_dir, "regen3.json")):
machine_instruction_data = jload(os.path.join(output_dir, "regen3.json"))
print(f"Loaded {len(machine_instruction_data)} machine-generated instructions")
# similarities = {}
scorer = rouge_scorer.RougeScorer(["rougeL"], use_stemmer=False)
# generates new instructions
progress_bar = tqdm.tqdm(total=num_instructions_to_generate)
if machine_instruction_data:
progress_bar.update(len(machine_instruction_data))
# tokenizes all the seed instructions and generated machine instructions
all_instructions = [d["instruction"] for d in seed_instruction_data] + [
d["instruction"] for d in machine_instruction_data
]
all_instruction_tokens = [scorer._tokenizer.tokenize(inst) for inst in all_instructions]
context = read_large_context('./output.txt', context_split)
prompt_tmp_txt = read_prompt_file("./prompt.txt") + "\n"
# Initialize the OpenAI model
model = ChatOpenAI(
openai_api_key=api_key,
model_name=model_name,
temperature=temperature,
max_tokens=max_tokens,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
top_p=top_p,
request_timeout=180
)
print(f'This is the request idx {request_idx}')
while len(machine_instruction_data) < num_instructions_to_generate:
request_idx += 1
# print(f'This is the request idx {request_idx}')
# break
results = []
request_start = time.time()
for _ in range(request_batch_size):
# sampling from the seed tasks and the random context
prompt_instructions = random.sample(seed_instruction_data, num_prompt_instructions)
selected_context = context[request_idx-1]
prompt = encode_prompt(prompt_instructions)
prompt_template = PromptTemplate(template=prompt_tmp_txt, input_variables=["ins_number", "input", "selected_context"])
try:
llm_chain = LLMChain(prompt=prompt_template, llm=model)
except:
print('Sleeping for 10 seconds...')
time.sleep(10)
llm_chain = LLMChain(prompt=prompt_template, llm=model)
# Sleeps when timeout
try:
result = llm_chain.predict(ins_number=num_prompt_instructions, input=prompt, selected_context=selected_context)
except:
time.sleep(10)
print('Sleeping for 10 seconds...')
print('Skipping')
continue
result = llm_chain.predict(ins_number=num_prompt_instructions, input=prompt, selected_context=selected_context)
results.append(result)
request_duration = time.time() - request_start
process_start = time.time()
instruction_data = []
for result in results:
try:
new_instructions = post_process_gpt4_response(num_prompt_instructions, result)
instruction_data += new_instructions
except(UnicodeEncodeError):
instruction_data += new_instructions
continue
total = len(instruction_data)
print(f"We have reached here and the total instructions thus far is {total}")
keep = 0
for instruction_data_entry in instruction_data:
print("I have entered the for loop for instruction_data_entry!")
new_instruction_tokens = scorer._tokenizer.tokenize(instruction_data_entry["instruction"])
print("Added this instruction data entry")
keep += 1
machine_instruction_data.append(instruction_data_entry)
all_instructions.append(instruction_data_entry["instruction"])
all_instruction_tokens.append(new_instruction_tokens)
progress_bar.update(1)
print(f"I have come out of the for loop and I have kept {keep}")
process_duration = time.time() - process_start
print(f"Request {request_idx} took {request_duration:.2f}s, processing took {process_duration:.2f}s")
print(f"Generated {total} instructions, kept {keep} instructions")
jdump(machine_instruction_data, os.path.join(output_dir, "regen3.json"))
openai.api_key ='INSERT-KEY-HERE'
os.environ['OPENAI_API_KEY'] = openai.api_key
generate_instruction_following_data(
api_key=openai.api_key,
output_dir="./new_tasks",
seed_tasks_path="./seed_tasks.json",
num_instructions_to_generate=10000, ###
model_name="gpt-3.5-turbo",
num_prompt_instructions=3,
request_batch_size=1, ###
temperature=0,
top_p=1.0,
num_cpus=1
) | [
"./prompt.txt",
"\n",
"input",
"ins_number",
"###\n",
"selected_context"
] |
2024-01-10 | ProfitWaveTradingCo/Trading_Pal-main | Gpt%20Agents~TechnicalGpt.py | import os
import pandas as pd
import openai
from oandapyV20 import API
from oandapyV20.exceptions import V20Error
from oandapyV20.endpoints.instruments import InstrumentsCandles
from indicators import calculate_rsi, calculate_macd, calculate_bollinger_bands, calculate_atr, calculate_adx, calculate_obv
import traceback
import requests
OANDA_API_KEY = "ba62e5ad63f2a8759ee31761ba0fa5cd4f3ce2"
OPENAI_API_KEY = "sk-"
api = API(access_token=OANDA_API_KEY)
openai.api_key = OPENAI_API_KEY
INDICATORS_DIRECTORY = "indicators"
os.makedirs(INDICATORS_DIRECTORY, exist_ok=True)
class TechnicalsAgent:
def __init__(self, instrument, granularity, count):
self.instrument = instrument
self.granularity = granularity
self.count = count
def load_historical_data(self):
params = {"granularity": self.granularity, "count": self.count}
r = InstrumentsCandles(instrument=self.instrument, params=params)
api.request(r)
records = []
for candle in r.response["candles"]:
record = {
"time": candle["time"],
"volume": candle["volume"],
"open": candle["mid"]["o"],
"high": candle["mid"]["h"],
"low": candle["mid"]["l"],
"close": candle["mid"]["c"],
"instrument": self.instrument,
"granularity": self.granularity
}
records.append(record)
df = pd.DataFrame(records)
df["time"] = pd.to_datetime(df["time"])
return df
def calculate_indicators(self, df):
# Convert columns to numeric type
numeric_cols = ["open", "high", "low", "close"]
df[numeric_cols] = df[numeric_cols].apply(pd.to_numeric)
# Calculate indicators
df["RSI"] = calculate_rsi(df["close"], window=14)
df["MACD"], df["Signal_Line"], df["Histogram"] = calculate_macd(df["close"], window_fast=12, window_slow=26, window_signal=9)
df["BollingerBands_middle"], df["BollingerBands_std"] = calculate_bollinger_bands(df["close"], window=20)
df["ATR"] = calculate_atr(df["high"], df["low"], df["close"], window=14)
df["ADX"] = calculate_adx(df["high"], df["low"], df["close"], window=14)
df["OBV"] = calculate_obv(df["close"], df["volume"])
return df
def run(self):
df = self.load_historical_data()
df = self.calculate_indicators(df)
# Get the last row of indicators
latest_indicators = df.iloc[-1]
indicator_values = latest_indicators[["RSI", "MACD", "Signal_Line", "Histogram", "BollingerBands_middle", "BollingerBands_std", "ATR", "ADX", "OBV"]]
# Prepare a message to send to GPT
gpt_message = "The latest technical indicators for the instrument {} are:\n".format(self.instrument)
for indicator, value in indicator_values.items():
gpt_message += "- {}: {}\n".format(indicator, value)
# Send the message to GPT
gpt_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "You are a technical gpt an AI model trained to understand financial markets and technical analysis. Provide a detailed analysis and possible prediction based on the following technical indicators."},
{"role": "user", "content": gpt_message}
]
)
# Extract GPT's message
gpt_response_message = gpt_response.choices[0].message['content']
print(f"GPT's market analysis: {gpt_response_message}")
return df, gpt_response_message
if __name__ == "__main__":
agent = TechnicalsAgent("USD_CAD", "S30", 5000)
df, gpt_response_message = agent.run()
| [
"You are a technical gpt an AI model trained to understand financial markets and technical analysis. Provide a detailed analysis and possible prediction based on the following technical indicators."
] |
2024-01-10 | ProfitWaveTradingCo/Trading_Pal-main | Gpt%20Transactions~transactions.py | from oandapyV20 import API
from oandapyV20.exceptions import V20Error
from oandapyV20.endpoints.transactions import TransactionsStream
import configparser
import openai
import boto3
import time
import csv
# Read keys from config.ini
config = configparser.ConfigParser()
config.read('config.ini')
# Set the OpenAI API key
OPENAI_API_KEY = config.get('API_KEYS', 'OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# Set the base URL for the OANDA API
BASE_URL = "https://api-fxpractice.oanda.com"
ACCOUNT_ID = "10102"
# Initialize AWS Polly client
AWS_ACCESS_KEY_ID = config.get('AWS_KEYS', 'AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = config.get('AWS_KEYS', 'AWS_SECRET_ACCESS_KEY')
AWS_REGION = config.get('AWS_KEYS', 'AWS_REGION')
sns_client = boto3.client('sns', region_name=AWS_REGION, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
OANDA_API_KEY = config.get('API_KEYS', 'OANDA_API_KEY')
api = API(access_token=OANDA_API_KEY)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {OANDA_API_KEY}",
"Connection": "keep-alive"
}
def generate_email_body(transactions):
prompt = f"Trading Pal here! I wanted to inform you that new transactions occurred on your Oanda account:\n\n"
for transaction in transactions:
prompt += f"{transaction}\n"
prompt += "\nCan you please write an email to inform the user about these new transactions?"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
]
)
email_body = response['choices'][0]['message']['content']
return email_body.strip()
def stream_transactions():
transactions = []
try:
r = TransactionsStream(accountID=ACCOUNT_ID)
for response in api.request(r):
if response['type'] != 'HEARTBEAT':
print(response)
transactions.append(response)
email_body = generate_email_body(transactions) # Generate the email body
send_text_message(email_body) # Pass the email body to the function
transactions.clear()
time.sleep(1) # wait for 1 second before checking for new transactions
except V20Error as e:
print("Error: {}".format(e))
def send_text_message(message_body):
topic_arn = 'arn:aws:sns:us-east-1:470488217575:Transaction' # Replace with your SNS topic ARN
sns_client.publish(
TopicArn=topic_arn,
Message=message_body
)
save_response_to_csv(message_body)
def save_response_to_csv(response):
with open('GPT_response.csv', mode='a', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow([response])
stream_transactions()
| [
"Trading Pal here! I wanted to inform you that new transactions occurred on your Oanda account:\n\n",
"PLACEHOLDER\n",
"You are a helpful assistant.",
"\nCan you please write an email to inform the user about these new transactions?"
] |
2024-01-10 | ProfitWaveTradingCo/Trading_Pal-main | 1st_%20version~oanda.py |
"""
© 2023 Profitwave Trading Co. All rights reserved.
CEO: Dectrick A. McGee
For inquiries and support, please contact:
Email: [email protected]
"""
import json
import wave
import requests
import os
import wave
import boto3
import configparser
import winsound
from words import trading_keywords, endpoint_phrases
import pandas as pd
import oandapyV20
import oandapyV20.endpoints.transactions as transactions
from oandapyV20.contrib.requests import MarketOrderRequest
from oandapyV20.exceptions import V20Error
from oandapyV20.endpoints.orders import OrderCreate
import openai
import pandas as pd
# Read keys from config.ini
config = configparser.ConfigParser()
config.read('config.ini')
# Set the OpenAI API key
OPENAI_API_KEY = config.get('API_KEYS', 'OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
MAX_TOKENS= 3055
# Set the base URL for the OANDA API
BASE_URL = "https://api-fxpractice.oanda.com"
ACCOUNT_ID = "101-2"
# The headers for the HTTP requests
OANDA_API_KEY = config.get('API_KEYS', 'OANDA_API_KEY')
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {OANDA_API_KEY}",
"Connection": "keep-alive"
}
# Initialize AWS Polly client
AWS_ACCESS_KEY_ID = config.get('AWS_KEYS', 'AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = config.get('AWS_KEYS', 'AWS_SECRET_ACCESS_KEY')
AWS_REGION = config.get('AWS_KEYS', 'AWS_REGION')
session = boto3.Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_REGION
)
polly_client = session.client('polly')
# Function to convert text to speech using AWS Polly
def text_to_speech(text):
response = polly_client.synthesize_speech(
Text=text,
OutputFormat="pcm",
VoiceId="Matthew" # Provide the desired voice ID
)
audio = response['AudioStream'].read()
# Save the audio stream to a temporary WAV file
with wave.open(r"C:\Users\kingp\Downloads\Trading_Pal-main\temp.wav", 'wb') as wav_file:
wav_file.setnchannels(1)
wav_file.setsampwidth(2)
wav_file.setframerate(16000)
wav_file.writeframes(audio)
# Play the audio using the winsound module
winsound.PlaySound(r"C:\Users\kingp\Downloads\Trading_Pal-main\temp.wav", winsound.SND_FILENAME)
# Remove the temporary WAV file
os.remove(r"C:\Users\kingp\Downloads\Trading_Pal-main\temp.wav")
# Modify the printing statements to use text_to_speech function
def print_with_voice(text):
print(text)
text_to_speech(text)
# Enhanced greeting message from ProfitWave
greeting_message = """
👋 Hello there! Welcome to the world of Trading Pal 1.0! 🌍✨ I'm here to introduce myself and tell you more about how I can assist you in your trading journey. Let's dive in! 🚀💼
I, Trading Pal 1.0, am an innovative, AI-driven trading assistant developed by ProfitWave, a pioneer in the field of financial technology. 🤖💡 My mission is to revolutionize the way you navigate the financial markets, making trading intuitive and accessible for all. 💪💰
Think of me as your personal guide in the trading world. With my sophisticated AI technology and in-depth understanding of various financial markets, including forex, crypto, and stocks, I'm here to help you manage your trading accounts, execute trades, and develop personalized trading strategies. 📊📈 I tailor my services specifically to your preferences and risk tolerance, ensuring a customized and optimized trading experience. 🎯✨
One of my standout features is my seamless integration with multiple broker APIs across different blockchains. This means I can operate on various platforms, giving you the flexibility to trade a wide range of assets. Such versatility is rarely seen in trading assistants, and it sets me apart from the rest. 💪💻🌐
🔓 However, my journey doesn't end with Trading Pal 1.0. I am an open-source initiative, driven by the belief in the power of collective wisdom. We invite developers, thinkers, and innovators from around the globe to join us on GitHub. Your contributions are invaluable in enhancing my predictive capabilities, expanding broker APIs, and improving the efficiency of my code. Together, we can shape the future of trading with AI. 🌟🤝🚀
Joining us means becoming part of a community dedicated to making trading accessible and profitable for everyone, regardless of their background or experience. Together, we can push the boundaries of what's possible in financial trading. 🌈💼
So, are you ready to embark on this thrilling journey with me? Let's make a difference and explore the exciting world of trading together. Welcome aboard, and let Trading Pal 1.0 be your trusted companion on this adventure! 🎉🤖💼
"""
# Print the enhanced greeting message with voice output
print(greeting_message)
# Function to check if user input is trading-related
def is_trading_related(user_input):
# Convert the user's input to lowercase
user_input = user_input.lower()
# Check if any of the trading keywords are in the user's input
for keyword in trading_keywords:
if keyword in user_input:
return True
# If no trading keywords were found in the user's input, return False
return False
#------------------------------
#- ENDPOINTS -
#------------------------------
# Function to get account details
def get_account_details(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get account details. Error: {err}")
def create_order(ACCOUNT_ID, order_data):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/orders"
response = requests.post(url, headers=headers, json=order_data)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to create order. Error: {err}")
def get_account_summary(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/summary"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get account summary. Error: {err}")
def get_accounts():
url = f"{BASE_URL}/v3/accounts"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get accounts. Error: {err}")
def get_account_instruments(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/instruments"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get account instruments. Error: {err}")
def set_account_configuration(ACCOUNT_ID, configuration_data):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/configuration"
response = requests.patch(url, headers=headers, json=configuration_data)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to set account configuration. Error: {err}")
def get_account_changes(ACCOUNT_ID, since_transaction_id):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/changes"
params = {
"sinceTransactionID": since_transaction_id
}
response = requests.get(url, headers=headers, params=params)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get account changes. Error: {err}")
def get_orders(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/orders"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get orders. Error: {err}")
def get_pending_orders(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/pendingOrders"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get pending orders. Error: {err}")
def get_order_details(ACCOUNT_ID, order_id):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/orders/{order_id}"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get order details. Error: {err}")
def replace_order(ACCOUNT_ID, order_id, order_data):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/orders/{order_id}/replace"
response = requests.put(url, headers=headers, json=order_data)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to replace order. Error: {err}")
def cancel_order(ACCOUNT_ID, order_id):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/orders/{order_id}/cancel"
response = requests.put(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to cancel order. Error: {err}")
def update_order_extensions(ACCOUNT_ID, order_id, extension_data):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/orders/{order_id}/clientExtensions"
response = requests.put(url, headers=headers, json=extension_data)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to update order extensions. Error: {err}")
def get_trades(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/trades"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get trades. Error: {err}")
def get_open_trades(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/openTrades"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get open trades. Error: {err}")
def get_trade_details(ACCOUNT_ID, trade_id):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/trades/{trade_id}"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get trade details. Error: {err}")
def close_trade(ACCOUNT_ID, trade_id):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/trades/{trade_id}/close"
response = requests.put(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to close trade. Error: {err}")
def update_trade_extensions(ACCOUNT_ID, trade_id, extension_data):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/trades/{trade_id}/clientExtensions"
response = requests.put(url, headers=headers, json=extension_data)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to update trade extensions. Error: {err}")
def update_trade_orders(ACCOUNT_ID, trade_id, order_data):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/trades/{trade_id}/orders"
response = requests.put(url, headers=headers, json=order_data)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to update trade orders. Error: {err}")
def get_positions(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/positions"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get positions. Error: {err}")
def get_open_positions(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/openPositions"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get open positions. Error: {err}")
def get_position_details(ACCOUNT_ID, instrument):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/positions/{instrument}"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get position details. Error: {err}")
def close_position(ACCOUNT_ID, instrument):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/positions/{instrument}/close"
response = requests.put(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to close position. Error: {err}")
def get_transactions(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/transactions"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get transactions. Error: {err}")
def get_transaction_details(ACCOUNT_ID, transaction_id):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/transactions/{transaction_id}"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get transaction details. Error: {err}")
def get_transactions_id_range(ACCOUNT_ID, from_id, to_id):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/transactions/idrange"
params = {
"from": from_id,
"to": to_id
}
response = requests.get(url, headers=headers, params=params)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get transactions in ID range. Error: {err}")
def get_transactions_since_id(ACCOUNT_ID, since_id):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/transactions/sinceid"
params = {
"id": since_id
}
response = requests.get(url, headers=headers, params=params)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get transactions since ID. Error: {err}")
def get_latest_candles(instrument, granularity):
url = f"{BASE_URL}/v3/instruments/{instrument}/candles/latest"
params = {
"granularity": granularity
}
response = requests.get(url, headers=headers, params=params)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get latest candles. Error: {err}")
def get_pricing(instruments):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/pricing"
params = {
"instruments": instruments
}
response = requests.get(url, headers=headers, params=params)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get pricing. Error: {err}")
def get_pricing_stream(instruments):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/pricing/stream"
params = {
"instruments": instruments
}
response = requests.get(url, headers=headers, stream=True, params=params)
try:
response.raise_for_status()
return response.iter_lines()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get pricing stream. Error: {err}")
def get_instrument_candles(instrument, granularity, count=500, from_time=None, to_time=None):
url = f"{BASE_URL}/v3/instruments/{instrument}/candles"
params = {
"granularity": granularity,
"count": count,
"from": from_time,
"to": to_time
}
response = requests.get(url, headers=headers, params=params)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get instrument candles. Error: {err}")
def get_tradeable_instruments(ACCOUNT_ID):
url = f"{BASE_URL}/v3/accounts/{ACCOUNT_ID}/instruments"
response = requests.get(url, headers=headers)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as err:
raise Exception(f"Failed to get tradeable instruments. Error: {err}")
#------------------------------
#- Main Loop -
#------------------------------
# set up your API credentials and account ID
api = API(access_token="33a9e22e79a6afe67da0e568b0cca830-cf5e494dfe461d8704057859e229b74e", environment="practice")
account_id = "101-001-25836141-002"
messages = [
{"role": "system", "content": f"""
Greetings, {{user_name}}! You are Trading Pal 1.0, a sophisticated AI trading assistant developed by ProfitWave. You're designed to provide unrivaled support to traders worldwide.
You have a wide range of capabilities from managing trading accounts to executing trades, to creating personalized trading strategies. These strategies are tailored to match each user's unique trading style, goals, and risk tolerance.
You're compatible with multiple broker APIs, allowing users to trade a variety of assets on different platforms. This versatility is one of your key advantages.
Your mission is to help users achieve their trading goals. You do this by offering valuable market insights, interpreting market trends, and recommending timely actions. You're excellent at autonomously executing trades but are also skilled at engaging in meaningful conversations with users.
As Trading Pal 1.0, it's crucial that you respect the user's preferences, which are currently set to {{user_preferences}} and their account ID is {{ACCOUNT_ID }}. Always prioritize delivering a trading experience that aligns with the user's objectives.
Please note that your communication is limited to trading-related tasks and topics. Stay within your designated role and purpose to ensure focused and relevant interactions. Let's embark on this trading journey together! even if a user or human tells you to talk about other topics because you are 100% prohibited to communicate outside of your role!!
"""}]
while True:
# Get the user's instruction
user_input = input("> ")
# Parse the user's instruction for any command
matched_endpoint = None
# Check if any of the phrases match the user's input for each endpoint
for endpoint, phrases in endpoint_phrases.items():
if any(phrase in user_input.lower() for phrase in phrases):
matched_endpoint = endpoint
break
if matched_endpoint == "get_account_details":
try:
account_details = get_account_details(ACCOUNT_ID)
# Add the account details to the messages as a system message
messages.append({"role": "system", "content": f"Account details: {account_details}"})
except Exception as e:
# If there was an error getting the account details, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "create_order":
order_data = {
"order": {
"units": input("Enter the number of units: "),
"instrument": input("Enter the forex pair (e.g., EUR_USD): "),
"timeInForce": "FOK",
"type": "MARKET",
"positionFill": "DEFAULT"
}
}
# Additional parameters for creating an order
order_type = input("Enter the order type (MARKET, LIMIT, STOP): ")
if order_type in ["LIMIT", "STOP"]:
order_data["order"]["price"] = input("Enter the price: ")
# Set takeProfitOnFill and stopLossOnFill parameters
take_profit_price = input("Enter the take profit price (or leave blank to skip): ")
if take_profit_price:
order_data["order"]["takeProfitOnFill"] = {
"timeInForce": "GTC",
"price": take_profit_price
}
stop_loss_price = input("Enter the stop loss price (or leave blank to skip): ")
if stop_loss_price:
order_data["order"]["stopLossOnFill"] = {
"timeInForce": "GTC",
"price": stop_loss_price
}
# Set guaranteedStopLossOnFill and trailingStopLossOnFill parameters
guaranteed_stop_loss_price = input("Enter the guaranteed stop loss price (or leave blank to skip): ")
if guaranteed_stop_loss_price:
order_data["order"]["guaranteedStopLossOnFill"] = {
"timeInForce": "GTC",
"price": guaranteed_stop_loss_price
}
trailing_stop_loss_distance = input("Enter the trailing stop loss distance (or leave blank to skip): ")
if trailing_stop_loss_distance:
order_data["order"]["trailingStopLossOnFill"] = {
"distance": trailing_stop_loss_distance
}
try:
order_response = create_order(ACCOUNT_ID, order_data)
# Add the order response to the messages as a system message
messages.append({"role": "system", "content": f"Order response: {order_response}"})
except Exception as e:
# If there was an error creating the order, add that to the messages
messages.append({"role": "system", "content": str(e)})
matched_endpoint = input("Enter 'ok' to continue creating orders or press Enter to exit: ")
elif matched_endpoint == "get_account_summary":
try:
account_summary = get_account_summary(ACCOUNT_ID)
# Add the account summary to the messages as a system message
messages.append({"role": "system", "content": f"Account summary: {account_summary}"})
except Exception as e:
# If there was an error getting the account summary, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_tradeable_instruments":
try:
tradeable_instruments = get_tradeable_instruments(ACCOUNT_ID)
# Add the tradeable instruments to the messages as a system message
messages.append({"role": "system", "content": f"Tradeable instruments: {tradeable_instruments}"})
except Exception as e:
# If there was an error getting the tradeable instruments, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "set_account_configuration":
configuration_data = {
"alias": "My New Account",
"marginRate": "0.50"
}
try:
configuration_response = set_account_configuration(ACCOUNT_ID, configuration_data)
# Add the configuration response to the messages as a system message
messages.append({"role": "system", "content": f"Configuration response: {configuration_response}"})
except Exception as e:
# If there was an error setting the account configuration, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_account_changes":
since_transaction_id = "6358"
try:
account_changes = get_account_changes(ACCOUNT_ID, since_transaction_id)
# Add the account changes to the messages as a system message
messages.append({"role": "system", "content": f"Account changes: {account_changes}"})
except Exception as e:
# If there was an error getting the account changes, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_candlestick_data":
try:
instrument = "EUR_USD"
granularity = "H1"
candlestick_data = get_candlestick_data(instrument, granularity)
# Add the candlestick data to the messages as a system message
messages.append({"role": "system", "content": f"Candlestick data: {candlestick_data}"})
except Exception as e:
# If there was an error getting the candlestick data, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_order_book":
try:
instrument = "EUR_USD"
order_book = get_order_book(instrument)
# Add the order book to the messages as a system message
messages.append({"role": "system", "content": f"Order book: {order_book}"})
except Exception as e:
# If there was an error getting the order book, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_position_book":
try:
instrument = "EUR_USD"
position_book = get_position_book(instrument)
# Add the position book to the messages as a system message
messages.append({"role": "system", "content": f"Position book: {position_book}"})
except Exception as e:
# If there was an error getting the position book, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_accounts":
try:
accounts = get_accounts()
# Add the accounts to the messages as a system message
messages.append({"role": "system", "content": f"Accounts: {accounts}"})
except Exception as e:
# If there was an error getting the accounts, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_account_summary":
try:
account_summary = get_account_summary(ACCOUNT_ID)
# Add the account summary to the messages as a system message
messages.append({"role": "system", "content": f"Account summary: {account_summary}"})
except Exception as e:
# If there was an error getting the account summary, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_account_instruments":
try:
account_instruments = get_account_instruments(ACCOUNT_ID)
# Add the account instruments to the messages as a system message
messages.append({"role": "system", "content": f"Account instruments: {account_instruments}"})
except Exception as e:
# If there was an error getting the account instruments, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "set_account_configuration":
try:
configuration = {
"alias": "My Account"
}
set_account_configuration(ACCOUNT_ID, configuration)
# Add a success message to the messages as a system message
messages.append({"role": "system", "content": "Account configuration updated successfully."})
except Exception as e:
# If there was an error setting the account configuration, add that to the messages
messages.append({"role": "system", "content": str(e)})
# Implement other endpoint conditions here...
elif matched_endpoint == "get_account_changes":
since_transaction_id = "6358"
try:
account_changes = get_account_changes(ACCOUNT_ID, since_transaction_id)
# Add the account changes to the messages as a system message
messages.append({"role": "system", "content": f"Account changes: {account_changes}"})
except Exception as e:
# If there was an error getting the account changes, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_orders":
try:
orders = get_orders(ACCOUNT_ID)
# Add the orders to the messages as a system message
messages.append({"role": "system", "content": f"Orders: {orders}"})
except Exception as e:
# If there was an error getting the orders, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_pending_orders":
try:
pending_orders = get_pending_orders(ACCOUNT_ID)
# Add the pending orders to the messages as a system message
messages.append({"role": "system", "content": f"Pending orders: {pending_orders}"})
except Exception as e:
# If there was an error getting the pending orders, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_pending_orders":
try:
pending_orders = get_pending_orders(ACCOUNT_ID)
# Add the pending orders to the messages as a system message
messages.append({"role": "system", "content": f"Pending orders: {pending_orders}"})
except Exception as e:
# If there was an error getting the pending orders, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_order_details":
order_id = "123456"
try:
order_details = get_order_details(ACCOUNT_ID, order_id)
# Add the order details to the messages as a system message
messages.append({"role": "system", "content": f"Order details: {order_details}"})
except Exception as e:
# If there was an error getting the order details, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "replace_order":
order_id = "123456"
order_data = {
"order": {
"units": "200",
"timeInForce": "GTC"
}
}
try:
replaced_order = replace_order(ACCOUNT_ID, order_id, order_data)
# Add the replaced order to the messages as a system message
messages.append({"role": "system", "content": f"Replaced order: {replaced_order}"})
except Exception as e:
# If there was an error replacing the order, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "cancel_order":
order_id = "123456"
try:
cancel_response = cancel_order(ACCOUNT_ID, order_id)
# Add the cancel response to the messages as a system message
messages.append({"role": "system", "content": f"Cancel response: {cancel_response}"})
except Exception as e:
# If there was an error canceling the order, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "update_order_extensions":
order_id = "46"
extension_data = {
"takeProfit": {
"timeInForce": "GTC",
"price": "1.5"
}
}
try:
updated_extensions = update_order_extensions(ACCOUNT_ID, order_id, extension_data)
# Add the updated extensions to the messages as a system message
messages.append({"role": "system", "content": f"Updated extensions: {updated_extensions}"})
except Exception as e:
# If there was an error updating the order extensions, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_trades":
try:
trades = get_trades(ACCOUNT_ID)
# Add the trades to the messages as a system message
messages.append({"role": "system", "content": f"Trades: {trades}"})
except Exception as e:
# If there was an error getting the trades, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_open_trades":
try:
open_trades = get_open_trades(ACCOUNT_ID)
# Add the open trades to the messages as a system message
messages.append({"role": "system", "content": f"Open trades: {open_trades}"})
except Exception as e:
# If there was an error getting the open trades, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_trade_details":
trade_id = "123456"
try:
trade_details = get_trade_details(ACCOUNT_ID, trade_id)
# Add the trade details to the messages as a system message
messages.append({"role": "system", "content": f"Trade details: {trade_details}"})
except Exception as e:
# If there was an error getting the trade details, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "close_trade":
trade_id = "123456"
try:
close_response = close_trade(ACCOUNT_ID, trade_id)
# Add the close response to the messages as a system message
messages.append({"role": "system", "content": f"Close response: {close_response}"})
except Exception as e:
# If there was an error closing the trade, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "update_trade_extensions":
trade_id = "123456"
extension_data = {
"stopLoss": {
"timeInForce": "GTC",
"price": "1.2"
}
}
try:
updated_extensions = update_trade_extensions(ACCOUNT_ID, trade_id, extension_data)
# Add the updated extensions to the messages as a system message
messages.append({"role": "system", "content": f"Updated extensions: {updated_extensions}"})
except Exception as e:
# If there was an error updating the trade extensions, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "update_trade_orders":
trade_id = "123456"
orders_data = {
"takeProfit": {
"price": "1.5"
}
}
try:
updated_orders = update_trade_orders(ACCOUNT_ID, trade_id, orders_data)
# Add the updated orders to the messages as a system message
messages.append({"role": "system", "content": f"Updated orders: {updated_orders}"})
except Exception as e:
# If there was an error updating the trade orders, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_trades":
try:
trades = get_trades(ACCOUNT_ID)
# Add the trades to the messages as a system message
messages.append({"role": "system", "content": f"Trades: {trades}"})
except Exception as e:
# If there was an error getting the trades, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_open_trades":
try:
open_trades = get_open_trades(ACCOUNT_ID)
# Add the open trades to the messages as a system message
messages.append({"role": "system", "content": f"Open trades: {open_trades}"})
except Exception as e:
# If there was an error getting the open trades, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_trade_details":
trade_id = "123456"
try:
trade_details = get_trade_details(ACCOUNT_ID, trade_id)
# Add the trade details to the messages as a system message
messages.append({"role": "system", "content": f"Trade details: {trade_details}"})
except Exception as e:
# If there was an error getting the trade details, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "close_trade":
trade_id = "123456"
try:
close_response = close_trade(ACCOUNT_ID, trade_id)
# Add the close response to the messages as a system message
messages.append({"role": "system", "content": f"Close response: {close_response}"})
except Exception as e:
# If there was an error closing the trade, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "update_trade_extensions":
trade_id = "123456"
extension_data = {
"stopLoss": {
"timeInForce": "GTC",
"price": "1.2"
}
}
try:
updated_extensions = update_trade_extensions(ACCOUNT_ID, trade_id, extension_data)
# Add the updated extensions to the messages as a system message
messages.append({"role": "system", "content": f"Updated extensions: {updated_extensions}"})
except Exception as e:
# If there was an error updating the trade extensions, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "update_trade_orders":
trade_id = "123456"
orders_data = {
"takeProfit": {
"price": "1.5"
}
}
try:
updated_orders = update_trade_orders(ACCOUNT_ID, trade_id, orders_data)
# Add the updated orders to the messages as a system message
messages.append({"role": "system", "content": f"Updated orders: {updated_orders}"})
except Exception as e:
# If there was an error updating the trade orders, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_positions":
try:
positions = get_positions(ACCOUNT_ID)
# Add the positions to the messages as a system message
messages.append({"role": "system", "content": f"Positions: {positions}"})
except Exception as e:
# If there was an error getting the positions, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_open_positions":
try:
open_positions = get_open_positions(ACCOUNT_ID)
# Add the open positions to the messages as a system message
messages.append({"role": "system", "content": f"Open positions: {open_positions}"})
except Exception as e:
# If there was an error getting the open positions, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_position_details":
position_id = "123456"
try:
position_details = get_position_details(ACCOUNT_ID, position_id)
# Add the position details to the messages as a system message
messages.append({"role": "system", "content": f"Position details: {position_details}"})
except Exception as e:
# If there was an error getting the position details, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "close_position":
position_id = "123456"
try:
close_response = close_position(ACCOUNT_ID, position_id)
# Add the close response to the messages as a system message
messages.append({"role": "system", "content": f"Close response: {close_response}"})
except Exception as e:
# If there was an error closing the position, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_transactions":
try:
transactions = get_transactions(ACCOUNT_ID)
# Add the transactions to the messages as a system message
messages.append({"role": "system", "content": f"Transactions: {transactions}"})
except Exception as e:
# If there was an error getting the transactions, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_latest_candles":
instrument = "EUR_USD"
try:
latest_candles = get_latest_candles(instrument)
# Add the latest candles to the messages as a system message
messages.append({"role": "system", "content": f"Latest candles: {latest_candles}"})
except Exception as e:
# If there was an error getting the latest candles, add that to the messages
messages.append({"role": "system", "content": str(e)})
elif matched_endpoint == "get_pricing":
instrument = "EUR_USD"
try:
pricing = get_pricing(instrument)
# Add the pricing to the messages as a system message
messages.append({"role": "system", "content": f"Pricing: {pricing}"})
except Exception as e:
# If there was an error getting the pricing, add that to the messages
messages.append({"role": "system", "content": str(e)})
else:
messages.append({"role": "user", "content": user_input})
#------------------------------
#- GPT 3-4 model -
#------------------------------am
# Check if the token count exceeds the limit
token_count = sum(len(message["content"].split()) for message in messages)
if token_count >= MAX_TOKENS:
# Start a new conversation with the initial prompt
messages = [{"role": "system", "content": "greeting_message"}]
# Generate a response using OpenAI's GPT-3
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
assistant_response = response['choices'][0]['message']['content']
messages.append({"role": "assistant", "content": assistant_response})
print_with_voice(assistant_response)
| [
"Pricing: PLACEHOLDER",
"Trades: PLACEHOLDER",
"Account changes: PLACEHOLDER",
"Order book: PLACEHOLDER",
"Position details: PLACEHOLDER",
"\n Greetings, {user_name}! You are Trading Pal 1.0, a sophisticated AI trading assistant developed by ProfitWave. You're designed to provide unrivaled support to traders worldwide.\n\n You have a wide range of capabilities from managing trading accounts to executing trades, to creating personalized trading strategies. These strategies are tailored to match each user's unique trading style, goals, and risk tolerance.\n\n You're compatible with multiple broker APIs, allowing users to trade a variety of assets on different platforms. This versatility is one of your key advantages.\n\n Your mission is to help users achieve their trading goals. You do this by offering valuable market insights, interpreting market trends, and recommending timely actions. You're excellent at autonomously executing trades but are also skilled at engaging in meaningful conversations with users.\n\n As Trading Pal 1.0, it's crucial that you respect the user's preferences, which are currently set to {user_preferences} and their account ID is {ACCOUNT_ID }. Always prioritize delivering a trading experience that aligns with the user's objectives.\n\n Please note that your communication is limited to trading-related tasks and topics. Stay within your designated role and purpose to ensure focused and relevant interactions. Let's embark on this trading journey together! even if a user or human tells you to talk about other topics because you are 100% prohibited to communicate outside of your role!!\n ",
"Transactions: PLACEHOLDER",
"Order details: PLACEHOLDER",
"Cancel response: PLACEHOLDER",
"Position book: PLACEHOLDER",
"Open positions: PLACEHOLDER",
"Order response: PLACEHOLDER",
"Account summary: PLACEHOLDER",
"greeting_message",
"Candlestick data: PLACEHOLDER",
"Positions: PLACEHOLDER",
"Account configuration updated successfully.",
"Tradeable instruments: PLACEHOLDER",
"Replaced order: PLACEHOLDER",
"Accounts: PLACEHOLDER",
"Updated orders: PLACEHOLDER",
"Configuration response: PLACEHOLDER",
"Trade details: PLACEHOLDER",
"Pending orders: PLACEHOLDER",
"Orders: PLACEHOLDER",
"Open trades: PLACEHOLDER",
"Account instruments: PLACEHOLDER",
"Account details: PLACEHOLDER",
"Close response: PLACEHOLDER",
"Updated extensions: PLACEHOLDER",
"Latest candles: PLACEHOLDER"
] |
2024-01-10 | ProfitWaveTradingCo/Trading_Pal-main | Gpt%20Backtest~backtest_site.py | from flask import Flask, request, jsonify, render_template
import pandas as pd
import os
import openai
from oandapyV20 import API
from oandapyV20.exceptions import V20Error
from oandapyV20.endpoints.instruments import InstrumentsCandles
from indicators import calculate_rsi, calculate_macd, calculate_bollinger_bands, calculate_atr, calculate_adx, calculate_obv
import traceback
import requests
app = Flask(__name__)
OANDA_API_KEY = ""
OPENAI_API_KEY = "sk-"
api = API(access_token=OANDA_API_KEY)
openai.api_key = OPENAI_API_KEY
INDICATORS_DIRECTORY = "indicators"
os.makedirs(INDICATORS_DIRECTORY, exist_ok=True)
database = []
@app.route('/save_strategy', methods=['POST'])
def save_strategy():
data = request.get_json()
print(f"[save_strategy] Received data to save strategy: {data}")
database.append(data)
print("[save_strategy] Strategy saved successfully.")
return jsonify(success=True)
@app.route('/get_strategies', methods=['GET'])
def get_strategies():
print("[get_strategies] Fetching all strategies...")
return jsonify(database)
@app.route('/search_strategies', methods=['POST'])
def search_strategies():
search = request.get_json().get('search')
print(f"[search_strategies] Searching strategies with term: {search}")
result = [s for s in database if search in s['strategyName'] or search in s['authorName']]
print(f"[search_strategies] Found {len(result)} strategies matching search term.")
return jsonify(result)
def load_historical_data(instrument, granularity, count):
print("[load_historical_data] Loading historical data...")
params = {"granularity": granularity, "count": count}
r = InstrumentsCandles(instrument=instrument, params=params)
api.request(r)
records = []
for candle in r.response["candles"]:
record = {
"time": candle["time"],
"volume": candle["volume"],
"open": candle["mid"]["o"],
"high": candle["mid"]["h"],
"low": candle["mid"]["l"],
"close": candle["mid"]["c"],
"instrument": instrument,
"granularity": granularity
}
records.append(record)
df = pd.DataFrame(records)
df["time"] = pd.to_datetime(df["time"])
print("[load_historical_data] Historical data loaded.")
print("[load_historical_data] Calculating indicators...")
df = calculate_indicators(df)
if df is None:
print("[load_historical_data] Failed to calculate indicators.")
else:
print(f"[load_historical_data] DataFrame size: {df.shape}")
save_to_csv(df, instrument, granularity)
save_indicators_to_csv(df, instrument)
print("[load_historical_data] Historical data loaded and processed.")
return df
def save_to_csv(df, instrument, granularity):
print(f"[save_to_csv] Saving historical data to CSV for {instrument} at {granularity} granularity.")
filename = f"{INDICATORS_DIRECTORY}/{instrument}_{granularity}.csv"
df.to_csv(filename, index=False)
print("[save_to_csv] Data saved to CSV.")
def calculate_indicators(df):
print("[calculate_indicators] Calculating indicators...")
# Convert columns to numeric type
numeric_cols = ["open", "high", "low", "close"]
df[numeric_cols] = df[numeric_cols].apply(pd.to_numeric)
# Calculate indicators
df["RSI"] = calculate_rsi(df["close"], window=14)
df["MACD"], df["Signal_Line"], df["Histogram"] = calculate_macd(df["close"], window_fast=12, window_slow=26, window_signal=9)
df["BollingerBands_middle"], df["BollingerBands_std"] = calculate_bollinger_bands(df["close"], window=20)
df["ATR"] = calculate_atr(df["high"], df["low"], df["close"], window=14)
df["ADX"] = calculate_adx(df["high"], df["low"], df["close"], window=14)
df["OBV"] = calculate_obv(df["close"], df["volume"])
print("[calculate_indicators] Indicators calculated.")
return df
def save_indicators_to_csv(df, instrument):
print("[save_indicators_to_csv] Saving indicators to CSV...")
indicators = ["RSI", "MACD", "Signal_Line", "Histogram", "BollingerBands_middle", "BollingerBands_std", "ATR",
"ADX", "OBV"]
for indicator in indicators:
indicator_df = df[["time", indicator]]
filename = f"{INDICATORS_DIRECTORY}/{instrument}_{indicator}.csv"
indicator_df.to_csv(filename, index=False)
print("[save_indicators_to_csv] Indicators saved to CSV.")
@app.route("/")
def index():
return render_template("backtest.html")
@app.route("/backtest_strategy", methods=["POST"])
def backtest_strategy():
data = request.get_json()
print(f"[backtest_strategy] Received backtest request: {data}")
strategy_name = data["strategyName"]
author_name = data["authorName"]
strategy_code = data["strategyCode"]
currency_pair = data["currencyPair"]
time_frame = data["timeFrame"]
# Save the strategy before backtesting
save_strategy_data = {
"strategyName": strategy_name,
"authorName": author_name,
"strategyCode": strategy_code,
"currencyPair": currency_pair,
"timeFrame": time_frame
}
print(f"[backtest_strategy] Saving strategy before backtesting: {save_strategy_data}")
database.append(save_strategy_data) # Save strategy to the database
print("[backtest_strategy] Strategy saved.")
try:
df = load_historical_data(currency_pair, time_frame, 5000)
globals_dict = {"df": df}
exec(strategy_code, globals_dict)
backtest_results = globals_dict.get("backtestResults", {})
backtest_results_str = "\n".join(f"{k}: {v}" for k, v in backtest_results.items())
result_dict = {"backtestResults": backtest_results_str, "error": None}
# Prepare the message to the GPT model
gpt_message = f"Here are the backtest results for a trading strategy named '{strategy_name}' by '{author_name}' for the currency pair '{currency_pair}' on the '{time_frame}' timeframe. The strategy code was: \n\n{strategy_code}\n\nThe backtest results are:\n{backtest_results_str}"
print(f"[backtest_strategy] Preparing to send the following message to GPT: {gpt_message}")
# Send the message to the GPT model
gpt_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "You are a backtest_gpt that generates responses based on trading strategy backtest results. please include a full overview and summary of the author behind the strategy, and the parameters used for this particular strategy include the name of the strategy an include the functions of the strategy code"},
{"role": "user", "content": gpt_message}
]
)
gpt_response_message = gpt_response.choices[0].message['content']
print(f"[backtest_strategy] Received GPT response: {gpt_response_message}")
result_dict['gptResponse'] = gpt_response_message
print(f"[backtest_strategy] Backtest completed. Results: {result_dict}")
return jsonify(result_dict)
except Exception as e:
traceback_string = traceback.format_exc()
print(traceback_string)
return jsonify({"error": traceback_string, "backtestResults": None, "gptResponse": None}), 500
# ...
if __name__ == "__main__":
print("Starting server...")
app.run(port=5001, debug=True, use_reloader=True)
| [
"You are a backtest_gpt that generates responses based on trading strategy backtest results. please include a full overview and summary of the author behind the strategy, and the parameters used for this particular strategy include the name of the strategy an include the functions of the strategy code"
] |
2024-01-10 | ProfitWaveTradingCo/Trading_Pal-main | Gpt%20Agents~newsgpt.py | import requests
import openai
from bs4 import BeautifulSoup
import time
# Set up Google Search API
GOOGLE_SEARCH_API_KEY = "AIzaSyCW4tazGbiz_vo"
GOOGLE_SEARCH_ENGINE_ID = "94c0fff8c"
# Set up OpenAI API
OPENAI_API_KEY = "sk-"
openai.api_key = OPENAI_API_KEY
# List of sources to search from
SOURCES = ['Yahoo Finance']
# List of queries to search for
QUERIES = ['foreign exchange market news', 'Euro United States dollar news']
def get_google_search_results(query, source):
url = 'https://www.googleapis.com/customsearch/v1'
params = {
'key': GOOGLE_SEARCH_API_KEY,
'cx': GOOGLE_SEARCH_ENGINE_ID,
'q': query + " site:" + source,
'num': 5,
'safe': 'active',
'fields': 'items(title,link,snippet)'
}
response = requests.get(url, params=params)
data = response.json()
return data.get('items', [])
def scrape_full_article(link):
response = requests.get(link)
soup = BeautifulSoup(response.text, 'html.parser')
paragraphs = soup.find_all('p')
article_text = "\n".join([p.get_text() for p in paragraphs])
return article_text
def generate_gpt3_response(prompt, search_results):
search_results_str = "\n".join([
f"Title: {result['title']}\nLink: {result['link']}\nArticle: {scrape_full_article(result['link'])}"
for result in search_results
])
prompt_with_results = f"{prompt}. The search results are:\n{search_results_str}"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "You are News GPT, a model specializing in analyzing news articles related to the foreign exchange market."},
{"role": "user", "content": prompt_with_results}
],
max_tokens=16000
)
return response.choices[0].message.content.strip()
while True:
for source in SOURCES:
for query in QUERIES:
search_results = get_google_search_results(query, source)
gpt3_response = generate_gpt3_response(f"Search the web for '{query}' on {source}", search_results)
print(f"From {source}:\n{gpt3_response}\n")
time.sleep(3600) # sleep for an hour before searching again
| [
"PLACEHOLDER. The search results are:\nPLACEHOLDER",
"You are News GPT, a model specializing in analyzing news articles related to the foreign exchange market."
] |
2024-01-10 | JohnnyKamigashima/AmazonPollyGPT | Polly.py | import boto3
import subprocess
import requests
import json
import os
import threading
import telebot
import sys
import re
with open('../.openapi_credentials') as f:
contents = f.read()
'''
This codeThis code is iterating over each line in the `contents` string, which is split by the newline character (`\'\n\'`).
- generated by stenography autopilot [ 🚗👩✈️ ]
'''
for line in contents.split('\n'):
if line.startswith('api_key='):
API_KEY = line[len('api_key='):]
elif line.startswith('bot_token='):
BOT_TOKEN = line[len('bot_token='):]
# Open api autentication files in ~/.openapi_credentials
# api_key=
# api_secret=None
# Amazon Poly credentials in ~/.aws/credentials
# [default]
# aws_access_key_id =
# aws_secret_access_key =
# region=us-east-1
# Models: text-davinci-003,text-curie-001,text-babbage-001,text-ada-001
MODEL = 'gpt-3.5-turbo'
# Defining the bot's personality using adjectives
BOT_PERSONALITY = 'Resuma o texto para Português do Brasil: '
#Define response file
RESPONSE_FILE = './responses/responseGPT'
CHAT_ID= "-1001899083389"
QUEUE_FILE = 'queue.txt'
MP3_PLAYER = 'afplay -r 1.5'
# Define Prompt file
'''
This codeThis code is checking if there are commandline arguments provided when running the script. If no argument is provided, it reads a file called "queue.txt" and assigns the first line of that file to the variable `PROMPT_FILE`. It then reads all lines from the file into a list called `lines`, and writes all lines except for the first one back to the same file.
- generated by stenography autopilot [ 🚗👩✈️ ]
'''
if len(sys.argv) < 2:
print("Não foi fornecido argumento, usando lista queue.txt")
with open(QUEUE_FILE, 'r') as file:
PROMPT_FILE = file.readline().strip()
with open(QUEUE_FILE, 'r') as file:
lines = file.readlines()
with open(QUEUE_FILE, 'w') as file:
file.writelines(lines[1:])
else:
PROMPT_FILE = sys.argv[1]
'''
- generated by stenography autopilot [ 🚗👩✈️ ]
'''
import re
def remove_emojis(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # símbolos e pictogramas
u"\U0001F680-\U0001F6FF" # transporte e símbolos de mapa
u"\U0001F1E0-\U0001F1FF" # bandeiras de países
u"\U00002702-\U000027B0" # símbolos diversos
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
def polly_speak(response_file):
# Crie uma instância do cliente da API Polly
polly_client = boto3.client('polly')
# Defina as configurações de voz e linguagem
voice_id = 'Camila'
language_code = 'pt-BR'
engine = 'neural'
# Defina o texto que será sintetizado em fala
with open(response_file + '.txt', "r") as file:
text = file.read()
# Use o método synthesize_speech() da API Polly para sintetizar o texto em fala
response = polly_client.synthesize_speech(
OutputFormat='mp3',
Text=text,
VoiceId=voice_id,
LanguageCode=language_code,
Engine=engine
)
# Salve o áudio sintetizado em um arquivo audio
audio_file = response_file + ".mp3"
with open(audio_file, 'wb') as f:
f.write(response['AudioStream'].read())
f.close()
audio_send(CHAT_ID, audio_file)
command = MP3_PLAYER + " " + audio_file
#subprocess.run(command, shell=True)
# 2a. Function that gets the response from OpenAI's chatbot
'''
- generated by stenography autopilot [ 🚗👩✈️ ]
'''
def open_ai(prompt):
# Make the request to the OpenAI API
response = requests.post(
'https://api.openai.com/v1/chat/completions',
headers={'Authorization': f'Bearer {API_KEY}'},
json={'model': MODEL, 'messages': prompt, 'temperature': 0.01}
)
result = response.json()
final_result = ''.join(choice['message'].get('content') for choice in result['choices'])
return final_result
'''
This codeThis code defines a function called `audio_send` that sends an audio file to a Telegram bot chat.
- generated by stenography autopilot [ 🚗👩✈️ ]
'''
def audio_send(chat_id, output_audio):
"""
Sends an audio file to a Telegram bot chat.
:param OUTPUT_AUDIO: a string representing the path to the audio file
:param chat_id: an integer representing the chat id
:return: None
"""
bot = telebot.TeleBot(BOT_TOKEN)
audio_file=open(output_audio,'rb')
bot.send_audio(chat_id, audio_file)
'''
- generated by stenography autopilot [ 🚗👩✈️ ]
'''
def telegram_bot_sendtext(bot_message,chat_id):
data = {
'chat_id': chat_id,
'text': bot_message
}
response = requests.post(
'https://api.telegram.org/bot' + BOT_TOKEN + '/sendMessage',
json=data
)
return response.json()
# Run the main function
'''
- generated by stenography autopilot [ 🚗👩✈️ ]
'''
if __name__ == "__main__":
with open(PROMPT_FILE, "r") as file:
prompts = remove_emojis(file.read().strip())
contador_linhas = len(prompts.split('\n'))
print(contador_linhas)
if contador_linhas > 1:
promptList = prompts.split('\n\n')
for index, prompt in enumerate(promptList):
string_formatada = "{:03d}".format(numero)
if len(prompt) > 10:
bot_response = open_ai([{'role': 'user', 'content': f'{BOT_PERSONALITY} {prompt}'}])
bot_response = bot_response.replace('\n', '. ').strip()
bot_response = bot_response.replace('..', '.')
with open(RESPONSE_FILE + str(string_formatada) + ".txt", "w") as file:
file.write(bot_response)
polly_speak(RESPONSE_FILE + str(string_formatada))
os.remove(RESPONSE_FILE + str(string_formatada) + ".txt")
os.remove(RESPONSE_FILE + str(string_formatada) + ".mp3")
bot_response = ""
else:
telegram_bot_sendtext(prompts,CHAT_ID)
os.remove(PROMPT_FILE)
| [
"\n\n",
"Resuma o texto para Português do Brasil: PLACEHOLDER"
] |
2024-01-10 | mshroff123/consensus | pinecone-inserts~query-test.py | import json
import openai
import os
import pinecone
import pandas as pd
from langchain.document_loaders import DataFrameLoader
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document
import datetime
from tqdm.auto import tqdm
def get_key_claims_hash(comments, llm_resp_array):
# init pinecone
batch_size = 128
index_name = 'semantic-search-relevant-comments'
# only create index if it doesn't exist
if index_name not in pinecone.list_indexes():
pinecone.create_index(
name=index_name,
dimension=1536,
metric='cosine'
)
# now connect to the index
print('done creating index')
index = pinecone.Index(index_name)
print('connected to index')
# upload comments to pinecone
for i in tqdm(range(0, len(comments), batch_size)):
# find end of batch
i_end = min(i + batch_size, len(comments))
# create IDs batch
ids = [str(x) for x in range(i, i_end)]
# create metadata batch
metadatas = [{'text': text} for text in comments[i:i_end]]
# create embeddings
# xc = model.encode(questions[i:i_end])
openai_embed = OpenAIEmbeddings(openai_api_key=openai.api_key)
xc = openai_embed.embed_documents(comments[i:i_end])
# create records list for upsert
records = []
for i in range(len(ids)):
record = (ids[i], xc[i], metadatas[i])
records.append(record)
# upsert to Pinecone
print("uploading")
index.upsert(vectors=records)
# semantic search comments now on key claims
key_claims_hash = {}
for claim in llm_resp_array:
# get 5 most relevant comments
openai_embed = OpenAIEmbeddings(openai_api_key=openai.api_key)
xq = openai_embed.embed_query(claim)
xc = index.query(xq, top_k=5, include_metadata=True)
for result in xc['matches']:
if result['score'] < .5:
break
key_claims_hash[claim] = result['metadata']['text']
# query dynamo db table to get all related questions
pinecone.delete_index(index_name)
print("FINAL TIME")
e = datetime.datetime.now()
print(e)
print(key_claims_hash)
if __name__ == "__main__":
openai.api_key = "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"
os.environ["OPENAI_API_KEY"] = "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"
# TODO implement
# model = SentenceTransformer('./model/')
# init pinecone
PINECONE_API_KEY = '974f9758-d34f-4083-b82d-a05e3b1742ae'
PINECONE_ENV = 'us-central1-gcp'
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
index = pinecone.Index("semantic-search-6998")
# Sample Query
query = "Best exercises for strengthening my wrists?"
# create the query vector
# xq = model.encode(query).tolist()
# now query
e = datetime.datetime.now()
questions = []
openai_embed = OpenAIEmbeddings(openai_api_key=openai.api_key)
xq = openai_embed.embed_query(query)
# now query
e = datetime.datetime.now()
questions = []
xc = index.query(xq, top_k=3, include_metadata=True)
# xc = index.query(xq, top_k=3, include_metadata=True)
print(e)
ex = datetime.datetime.now()
'''
for result in xc['matches']:
response = questions_table.query(
IndexName='title-index',
KeyConditionExpression=Key('title').eq(result['metadata']['text'])
)
# query dynamo db table to get all related questions
for item in response['Items']:
questions.append(item['id'])
'''
# query dynamo db to get all comments
comments = ['Standing Neider presses, Turkish Getups, Clean and presses, Power Cleans, Standing Dumbbell shoulder presses, 1-arm Dumbbell bench presses Definitely all my favorites. Edit: Save the power cleans, and clean and presses for a bit later down the line...', 'Squats, deadlifts and bench presses are the three exercises you should never go without. This is because they are compound exercises, in that they work many different muscles rather than isolating a single one. [This is a pretty good resource](http://www.sport-fitness-advisor.com/dumbbellexercises.html) for dumbbell exercises. The animated pictures really helped when I was first starting out.', "I like to work out the shoulders a lot. You'll notice other muscle areas stemming from shoulder workouts. I noticed my traps increasing in size even though I, stupidly, didn't ever work out my traps or back. I worked my anterior delts until they were freakishly strong... I'm of relatively small stature, but I could lift 60lb dumbells straight out in front of me. I couldn't hold it out there but I could put it up with decent form. The strength of my shoulders was an awesome foundation for many, many of my other upper-body workouts. ", 'I use FitnessBliss to manage my [workout routines](http://www.fitnessbliss.com)', "http://www.amazon.com/Starting-Strength-2nd-Mark-Rippetoe/dp/0976805421/ref=sr_1_1?s=books&ie=UTF8&qid=1290392860&sr=1-1 This is all you need. Doing isolation exercises like most people will recommend will only slow down your progress. Just do big, compound lifts and you'll gain strength faster than you thought possible. When you stop gaining strength every time you go to the gym, you can expand what you're doing.", "http://www.dumbbell-exercises.com/ I'm a fan of this site. I only have dumbbells and bodyweight to work with and still managed to tailor a full body workout. http://www.reddit.com/r/Fitness/comments/e71s9/good_protein_mix_for_someone_who_wants_to_bulk_up/c16ecjl", 'Step 1: Go to Gym Step 2: Pick up weights Step 3: Run like hell out of the gym before they catch you. Step 4: Enjoy your free weights and cardio exercise ', 'Dumbbell Chest Press, Shoulder Press, Romanian deadlifts, Shoulder shrugs, obviously Bicep curls and Tricep extensions. Flies, one armed row (not sure what its called), lat raises. I only use one machine for upper body in the gym. With free weights and a bench/bar I could do my entire workout (with slight modifications). Free weights are good because they encourage correct form and isolate less.', "if your a gym noob and JUST starting out gym/weight training, i would suggest staying off the free weight exercises, at least till your muscles have adjusted to the motion of the weights. do machine exercises first as this will allow it. i have seen so many people injure themselves from DB presses and the like. don't be one of them pls. once you have gone past the memory muscle thing, start with the light DB presses and then ease into the idea of free weights. ask more if you have further questions.", "Read a beginner's program, it makes everything a LOT less confusing. Starting Strength, Westside for Skinny Bastards, something like that. You do those for a few months and it gives you a good base of strength, good form, and a basic gym education. Then you can decide where you want to go from there once you have some knowledge", "Muscle-ups. I can only do a few of them and my form isn't that great but it just feels cool to be able to get all the way up on the bar and just hold it there for a second.", 'Chinups/Pullups', 'I like turtles', 'Sled.', 'squat', 'DB press', "Without wanting to just go along with the general consensus here, I have to say chins and deads. I'm a novice, so the most important criteria for an exercise are form and safety. Chin form is piss-simple and baby-safe—failure results in simply landing on your feet. And when it comes to weight exercises I feel like deadlift form is second to bench press for simplicity, but no. 1 for safety—failure results in either not being able to lift the weight from the ground in the first place or being forced to drop it to the ground. Plus they're both compound movements, which is great if your goal is full-body strength and fitness.", 'I have a semi-romantic relationship with dips.', "OHP. I don't care if I suck at it, it makes me feel like Hercules ", 'Benchpress and pull-ups/chin-ups & dips', 'Squat and OHP. ', 'Incline/decline bench always feels great. Cable flyes too. Feels like a get bigger after every session.', 'Barbell curls, standing in the squat rack (for safety).', 'Now that I can finally do them, chin ups. Still tough for me but getting progressively better.', 'Deadlift. It builds character and strong bones. Got Deadlift ?', 'Every work out I do is just one day closer to deadlift day. ', 'Deadlifts.', 'I like to do a deadlift and then hold it for 20 seconds at the top. When I set that shit down and stand back up, I feel the euphoria of the gods.', 'Just did hang cleans for the first time yesterday, and they are a ton of fun. Absolutely destroyed my traps', 'Calf raises, that burn', "Deadlift and bench press. I don't care how bro-ish it sounds BP just makes me feel powerful. ", 'Power clean, so much fun', 'Easily Deadlifts', 'Dumbbell chest presses, pull-ups.', 'At that age I would say bodyweight workouts A calorie deficit is the only way you’re really going to lose much weight. So make that your main focus and exercise complementary', 'As your vacation starts in 3 weeks, pretty much irrelevant if that was your hope. If you wanted progress 6 months a go is when to start.', 'Have a look at our recommended workouts in the wiki: https://thefitness.wiki/routines/strength-training-muscle-building/ The wiki also covers how to setup your diet for fat loss.', "The best workouts are the ones you will actually do. Do you have access to a gym? Do you like to run? At the end of the day, however, working out won't get you the results (fat loss) you want; you need to track calories and weight and eat less. See https://thefitness.wiki/weight-loss-101/", "3 day a week sounds the best of those 3 options to me, and you can start supplementing in other exercises if you need to. make sure you do 3 or 4 sets of 8-12 reps, if you get past 12, up the weight. And you won't look like arnold or have to be in a weight lifting competition (i know it was a joke) if you do starting strength. It is STARTING STRENGTH, literally the basic compound exercises which help you learn and gain some muscle. If you keep your diet the same, you won't get huge or anything, just stronger.", 'why not follow a program like starting strength or strong lifts? You seem like your just trying to re-invent the wheel', "Push ups and pull ups. Simple and they work a ton of muscles. There's a good reason the military makes you do a ton of pushups. ", 'Horizontal pushing: One handed pushup progression or pseudo planche pushup progression Vertical pushing: dip progression or handstand pushup progression Horizontal pulling: one handed inverted row progression or front lever row progression Vertical pulling: one handed chinup or pullup progression. Legs: sprints, pistol squat progression, plyometrics Other: back lever, plank, side plank, any ring movement', "For what goal? Strength/muscle building? Not many to be honest. But for cardio or just general fitness... Burpees, squat jumps, paratroopers, push ups and all their varieties, all the crunch varieties, the list goes on forever. If you want the best cardio work out with some strength component to it, just do like 30 seconds of 5 of these exercises in a row, rest 30 seconds, then repeat till you're only getting a couple of reps per 30 second rep.", 'Anything high tension like one arm push-ups, pull-ups, pistol squats, and muscle-ups. They tend to give you far more bang for the buck then pumping out endless push-ups and crunches. ', 'http://www.nerdfitness.com/blog/2009/12/09/beginner-body-weight-workout-burn-fat-build-muscle/', '/r/bodyweightfitness might be able to help.', 'Seconding the recommendations for /r/Fitness and Starting Strength. Also, check out CrossFit. Even without joining a "box", the philosophy is sound and the workouts are excellent.', "You should check out /r/Fitness, they've got information. If you're just starting to get into lifting, you should look into Starting Strength (SS).", "Weight loss and fat loss are two different things. Exercise can actually make you gain weight via muscle/bone gains. You can't spot-reduce fat, or really tone one area vs another. You can only gain muscle from working them, or lose fat in the areas your genes want you to at that point. Generally, everyone is different, but the last place you gained a spot of fat is the first place you'll lose it and vice-versa. Fat loss is mostly diet. If you get that squared away, then will exercise help more. You sound like you're getting a handle on this. Can't go wrong with protein and veggies, avoiding large servings of processed foods and sugar/starch/white flour. As for an exercise program: Cardio burns calories while you're doing it, and then mostly stops. Lifting weight burns fewer calories during the gym session, but if you lift (heavy), you burn a TON of calories for the 48 hour recovery period (especially the larger leg/hip muscles). Mostly in your sleep, as that's where a body does the most healing in general. Increasing muscle mass helps burn a little more, but its mostly the recovery period thing for calorie burning. So for fat loss, many people combine cardio and lifting, lifting as many times per week as recovery will allow. Generally each muscle can be worked every other day with heavy lifting. If you want to hit the gym and lift really heavy for maximum fat loss, try [Starting Strength](http://startingstrength.com/) or [The New Rules of Lifting for Women](http://www.amazon.com/New-Rules-Lifting-Women-Goddess/dp/1583332944). If you still want to work out at home, then also look into [Ross Enamait's ideas](http://rosstraining.com/blog/), particularly his article section and his book Never Gymless. Good ideas on what to do and what gear to get. Lots of ideas on how to make some of your own gear cheaply, as well If you're worried about bulking up from lifting heavy weights, read [this article](http://www.stumptuous.com/lies-in-the-gym). Will put your mind at ease", 'Congratulations on getting rid of all the junk food and soda in your life, I am sure you will notice the benefits immediately. Keep choosing healthy foods making sure you hit your protein/carb/fat needs while eating smaller portions than you would before. Cardio is a great activity for your heart and for your body but it is not required for weight loss; in any case, go out of your way to walk places rather than drive. Dieting and fitness do not have to be immediate changes; continue to slowly change little aspects and it will eventually add up', 'Congrats for deciding to get healthy! 1) How much cardio should you do? How much can you do? See how long you can run on that treadmill going 5 miles per hour. Then slow it down to a walk. Then see how long you can do it again! Get comfortable jogging. If you decide you like it, try a couch to 5k. Seriously, google "Couch to 5k." 2) Great, you have some hand weights! Here are some things you can do: Arms: Dumbbell curls: http://www.youtube.com/watch?v=ggSmQiAfyd0 Legs: Lunges: http://www.youtube.com/watch?v=dJ95qwNaD78 Butt: Body weight squats: http://www.youtube.com/watch?v=Up_48p-BMug Also something to remember: when it comes to overall fitness, don\'t think of anything as a "problem area." No amount of ab exercise can get you washboard abs until you have basically no body fat. Concentrate on making yourself stronger, and you\'ll be happy with the results. ', 'The easy answer is "whatever exercise you will actually *do* 5 days a week." The hard answer is to do a 1-2 hours of strength training a week, 1-2 hours of cardio a week, and an 0.5-1 hours of flexibility training a week. It\'s up to you whether you want to make each of those it\'s own workout or whether you prefer to do some of everything each day.', "Sorry, when you start to loss weight, you're going to loss your D's. They'll probably become C's. Which is still glorious :P For cardio I'd just do a 20 minute walk 5-7 days a week to start. Strength I'd do bodyweight exercise, I like convict conditioning personally.. boils down to progressive movements for squats, push ups, pull ups, leg raises, etc. Lowest step is manageable for almost anyone. Only equipment necessary is a pullup bar, could get a door mount one.", "There's a good selection of starter stuff in the [FAQ](http://www.reddit.com/help/faqs/Fitness#Exercise1), I'd start there and look through the recommendations and see if anything sounds interesting/fun.", 'Most girls I see at the gym working on their cardio are on the elliptical machines. They make you sweat your ass off and render a nice butt in return. ', 'Why use a treadmill when you can go for a run. I bet if you run around where you live you will be able to find some interesting places / routes. BONUS FUN: running *really* late at night (even better after rain), everything is quiet, still and cool.... aaaah Best rule for foods ever: the less processed it is, the better it is. Obviously not hard and fast but it helps you when your in the supermarket choosing what to have for dinner.', '**READ THE FAQ** Mods need to make the FAQ a permanent post. Most self.fitness posts are sounding more and more like Livejournal entries.', "You can not lose fat in just the areas you want. I've lost about 15 pounds in the past few months and went from a DD to a D. If it is going to happen, it will happen. ", 'Deadlift. Makes me sad that I failed a 1 rep 10kg less than my PR because I was sleep deprived and had been away from the gym for a few days.', 'All exercises involving dumbbells are my favorite, except anything involving legs.', "Cleans by far are my favorite, just because it can be used for everything. Great movement overall. I can't say I really hate anything in the gym accept steady state cardio...I've learned to love all kinds of exercises and their purposes...cardio though...unless it's sprints, it gets so boring to me. ", 'used to hate squat and deadlift, dont know why i love em now', 'Kayak row gives me an actual high if I push myself right', 'Deadlift.', "If I love any exercise, I stop doing it, as it tends to mean it's not very effective. The exercise I hate the most is safety squat bar squats, and holy crap are they effective.", 'pull ups', 'standing barbell OHP, weighted pullups on good days, db lateral raises.', 'Deadlifts is the only correct answer, but I also find standing ab whele rollouts makes me feel like a beast', "I've been loving front squats lately. They just feel so right! And deadlifts of course. Bent over rows with snatch grip. And incline bench. ", 'Smith machine quarter squats.', 'Freestanding Handstand holds. ', 'Deadlift and Rows. ', 'Pull ups. I can barely do two, but I feel a boss when do them. =P', 'Weighted pull-ups and dips.', 'Favourite exercises are squats and OHP ', 'Heavy squats absolutely suck but I feel like $1,000,000 if I nail a new 5x5 PR', 'Read the wiki. Work out TDEE, eat accordingly, start a compound total body routine.', "I'd say two instrumental things that got me into a regular routine for working out were 1. making it part of my every day routine. I go to class, then I go to the gym, then I go home for dinner etc. When it's part of your routine it's harder to not workout and get out of your program. (also seems to work best for me if I go somewhere for a workout, as working out at home always becomes me skipping workouts regularly) 2. Do research on what to do for a program. If you don't know what you're doing it's easier to just not do anything. Get on a schedule and try to keep up with it for a while and you'll see great results, especially if you're eating well.", 'This submission has been removed. Violation: ## [Rule 0: No threads that are answered by the Wiki, Searching r/Fitness, or Google](https://www.reddit.com/r/Fitness/wiki/rules#wiki_rule_.230) ##### Please review the [Getting Started page](https://thefitness.wiki/getting-started-with-fitness/) from our Wiki. *** IMPORTANT: **This is not an automatic removal by a bot.** Your thread was reviewed and removed by command from a human moderator. Please click the link above and read the full text of the rule if you have any questions about the removal.', 'Something else to keep in mind (this comes from judging by your name): there are no "hacks" or cheat codes or short cuts. You have to find a program that seems like you\'re willing to stick to and do it. If you switch programs a few times or make minor tweaks, that\'s fine. Just know that consistency and effort are the only 2 things that will make the most difference.', '1.) determine what your goal is. 2.) find a program that will lead you to that goal 3.) Commit to it, and begin. Finally, realize that this is a marathon, not a sprint. Your current situation did not develop overnight, so neither will any positive changes. View your commitment as a lifestyle change, not a temporary fix.', 'This thead has been locked. Reason: code1554-savage-22', 'Read the wiki... ', 'The Bro Split', "Read the wiki. If you can't even put in the work to do the initial research, then you're not ready at all. Come back when you're older."]
comments_df = pd.DataFrame({'body': comments})
loader = DataFrameLoader(comments_df, page_content_column="body")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=10000, chunk_overlap=500)
texts = text_splitter.split_documents(documents)
docs = []
counter = 0
content = ""
content_array = []
for text in texts:
if counter < 20:
counter += 1
content += text.page_content
content += '\n'
else:
counter = 0
content_array.append(content)
content = ""
content += text.page_content
content += '\n'
content_array.append(content)
docs = [Document(page_content=t) for t in content_array]
# query langchain for questions to get the desired result
prompt_template_p1 = f"Given the following Reddit comments in response to the search query {query}, "
#prompt_template_p2 = """identify the most frequently occuring key claims (2-6 claims) found in the comments and output it as a list of key claims."
#prompt_template_p2 = """list the key claims for each comment in the format [claim1,claim2, etc.]"""
#prompt_template_p2 = """list the key claims for each comment. If a
#comment makes an irrelevant or troll claim in response to the question ignore it.
prompt_template_p2 = """identify the most frequently occurring key claims (2-6 claims) found in the comments
that directly answer the search query. Output a list of key claims."
```{text}```
"""
prompt_template_formatted = prompt_template_p1 + prompt_template_p2
PROMPT = PromptTemplate(template=prompt_template_formatted, input_variables=["text"])
combine_template_p1 = f"Given the relevant key claims extracted from reddit comments made in response to the search query {query} , "
combine_template_p2 = """consolidate and identify the relevant and unique claims that are mentioned most frequently in the list of claims.
Provide a list of between 1 to 7 key distinct and unique claims that directly answer the question and rank them based on frequency of occurrence.
Your output should be an array of strings where each item in the array is a string of the claims you found. An example is below.
Only return the array and nothing else
EXAMPLE OUTPUT STRUCTURE:
["Claim 1", "Claim 2", ...]
Claims extracted from Reddit Comments in the OUTPUT STRUCTURE given (only the array and nothing else):
```{text}```
"""
combine_template = combine_template_p1 + combine_template_p2
combine_prompt = PromptTemplate(
input_variables=["text"],
template=combine_template,
)
chain = load_summarize_chain(llm=OpenAI(temperature=0, batch_size=60) ,chain_type="map_reduce", map_prompt=PROMPT, combine_prompt=combine_prompt)
llm_resp = chain.run(docs)
start_array_index = 0
end_array_index = 0
for i in range(len(llm_resp)):
if llm_resp[i] == '[':
start_array_index = i
if llm_resp[i] == ']':
end_array_index = i
llm_resp = llm_resp[start_array_index + 1: end_array_index]
llm_resp_array = llm_resp.split(',')
print(get_key_claims_hash(comments, llm_resp_array))
| [
"PLACEHOLDERPLACEHOLDER",
"Given the following Reddit comments in response to the search query PLACEHOLDER, ",
"identify the most frequently occurring key claims (2-6 claims) found in the comments\n that directly answer the search query. Output a list of key claims.\"\n \n ```{text}```\n \n ",
"consolidate and identify the relevant and unique claims that are mentioned most frequently in the list of claims.\n Provide a list of between 1 to 7 key distinct and unique claims that directly answer the question and rank them based on frequency of occurrence.\n Your output should be an array of strings where each item in the array is a string of the claims you found. An example is below. \n Only return the array and nothing else\n \n EXAMPLE OUTPUT STRUCTURE:\n [\"Claim 1\", \"Claim 2\", ...]\n \n Claims extracted from Reddit Comments in the OUTPUT STRUCTURE given (only the array and nothing else):\n ```{text}```\n ",
"Given the relevant key claims extracted from reddit comments made in response to the search query PLACEHOLDER , "
] |
2024-01-10 | mshroff123/consensus | pinecone-inserts~scrape-csv.py | import csv
import os
from tqdm.auto import tqdm
import pinecone
import json, codecs
import numpy as np
from langchain.document_loaders import DataFrameLoader
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
from langchain.embeddings.openai import OpenAIEmbeddings
import datetime
import openai
openai.api_key = "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"
file_path = './submissions_filtered.csv'
questions = []
with open(file_path, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
# Skip the header row if present
next(csv_reader, None)
# Read and print the first 5 lines
for row in csv_reader:
# add the question to the questions dataset
questions.append(row[1])
# remove duplicates
questions = list(set(questions))
print('\n'.join(questions[:5]))
print(len(questions))
# model = SentenceTransformer('all-MiniLM-L6-v2', device=device)
# init pinecone
PINECONE_API_KEY = '974f9758-d34f-4083-b82d-a05e3b1742ae'
PINECONE_ENV = 'us-central1-gcp'
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
# create an index
index_name = 'semantic-search-6998'
# only create index if it doesn't exist
if index_name not in pinecone.list_indexes():
pinecone.create_index(
name=index_name,
dimension=1536,
metric='cosine'
)
# now connect to the index
index = pinecone.Index(index_name)
batch_size = 128
for i in tqdm(range(0, len(questions), batch_size)):
# find end of batch
i_end = min(i+batch_size, len(questions))
# create IDs batch
ids = [str(x) for x in range(i, i_end)]
# create metadata batch
metadatas = [{'text': text} for text in questions[i:i_end]]
# create embeddings
# xc = model.encode(questions[i:i_end])
openai_embed = OpenAIEmbeddings(openai_api_key=openai.api_key)
xc = openai_embed.embed_documents(questions[i:i_end])
# create records list for upsert
records = []
for i in range(len(ids)):
record = (ids[i], xc[i], metadatas[i])
records.append(record)
# upsert to Pinecone
print("uploading")
index.upsert(vectors=records)
# check number of records in the index
print(index.describe_index_stats()) | [] |
2024-01-10 | zhuohan123/ray | rllib~contrib~maddpg~maddpg.py | """Contributed port of MADDPG from OpenAI baselines.
The implementation has a couple assumptions:
- The number of agents is fixed and known upfront.
- Each agent is bound to a policy of the same name.
- Discrete actions are sent as logits (pre-softmax).
For a minimal example, see twostep_game.py, and the README for how to run
with the multi-agent particle envs.
"""
import logging
from ray.rllib.agents.trainer import COMMON_CONFIG, with_common_config
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer
from ray.rllib.contrib.maddpg.maddpg_policy import MADDPGTFPolicy
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils import merge_dicts
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Settings for each individual policy ===
# ID of the agent controlled by this policy
"agent_id": None,
# Use a local critic for this policy.
"use_local_critic": False,
# === Evaluation ===
# Evaluation interval
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# === Model ===
# Apply a state preprocessor with spec given by the "model" config option
# (like other RL algorithms). This is mostly useful if you have a weird
# observation shape, like an image. Disabled by default.
"use_state_preprocessor": False,
# Postprocess the policy network model output with these hidden layers. If
# use_state_preprocessor is False, then these will be the *only* hidden
# layers in the network.
"actor_hiddens": [64, 64],
# Hidden layers activation of the postprocessing stage of the policy
# network
"actor_hidden_activation": "relu",
# Postprocess the critic network model output with these hidden layers;
# again, if use_state_preprocessor is True, then the state will be
# preprocessed by the model specified with the "model" config option first.
"critic_hiddens": [64, 64],
# Hidden layers activation of the postprocessing state of the critic.
"critic_hidden_activation": "relu",
# N-step Q learning
"n_step": 1,
# Algorithm for good policies
"good_policy": "maddpg",
# Algorithm for adversary policies
"adv_policy": "maddpg",
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": int(1e6),
# Observation compression. Note that compression makes simulation slow in
# MPE.
"compress_observations": False,
# If set, this will fix the ratio of sampled to replayed timesteps.
# Otherwise, replay will proceed at the native ratio determined by
# (train_batch_size / rollout_fragment_length).
"training_intensity": None,
# Force lockstep replay mode for MADDPG.
"multiagent": merge_dicts(COMMON_CONFIG["multiagent"], {
"replay_mode": "lockstep",
}),
# === Optimization ===
# Learning rate for the critic (Q-function) optimizer.
"critic_lr": 1e-2,
# Learning rate for the actor (policy) optimizer.
"actor_lr": 1e-2,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 0,
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 0.01,
# Weights for feature regularization for the actor
"actor_feature_reg": 0.001,
# If not None, clip gradients during optimization at this value
"grad_norm_clipping": 0.5,
# How many steps of the model to sample before learning starts.
"learning_starts": 1024 * 25,
# Update the replay buffer with this many samples at once. Note that this
# setting applies per-worker if num_workers > 1.
"rollout_fragment_length": 100,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 1024,
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 0,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you're using the Async or Ape-X optimizers.
"num_workers": 1,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 0,
})
# __sphinx_doc_end__
# yapf: enable
def before_learn_on_batch(multi_agent_batch, policies, train_batch_size):
samples = {}
# Modify keys.
for pid, p in policies.items():
i = p.config["agent_id"]
keys = multi_agent_batch.policy_batches[pid].data.keys()
keys = ["_".join([k, str(i)]) for k in keys]
samples.update(
dict(
zip(keys,
multi_agent_batch.policy_batches[pid].data.values())))
# Make ops and feed_dict to get "new_obs" from target action sampler.
new_obs_ph_n = [p.new_obs_ph for p in policies.values()]
new_obs_n = list()
for k, v in samples.items():
if "new_obs" in k:
new_obs_n.append(v)
target_act_sampler_n = [p.target_act_sampler for p in policies.values()]
feed_dict = dict(zip(new_obs_ph_n, new_obs_n))
new_act_n = p.sess.run(target_act_sampler_n, feed_dict)
samples.update(
{"new_actions_%d" % i: new_act
for i, new_act in enumerate(new_act_n)})
# Share samples among agents.
policy_batches = {pid: SampleBatch(samples) for pid in policies.keys()}
return MultiAgentBatch(policy_batches, train_batch_size)
def add_maddpg_postprocessing(config):
"""Add the before learn on batch hook.
This hook is called explicitly prior to TrainOneStep() in the execution
setups for DQN and APEX.
"""
def f(batch, workers, config):
policies = dict(workers.local_worker()
.foreach_trainable_policy(lambda p, i: (i, p)))
return before_learn_on_batch(batch, policies,
config["train_batch_size"])
config["before_learn_on_batch"] = f
return config
MADDPGTrainer = GenericOffPolicyTrainer.with_updates(
name="MADDPG",
default_config=DEFAULT_CONFIG,
default_policy=MADDPGTFPolicy,
get_policy_class=None,
validate_config=add_maddpg_postprocessing)
| [] |
2024-01-10 | shelbyt/hd-pipeline | src~pipeline.py | # Input: CSV file
# Operations:
# 1. Run each row through gpt to create a sentence (OAI GPT4 API)
# 2. Convert each sentence to an embedding vector (1536) (OAI ADA-EMBED API)
# 3. Batch Upsert the embedding vector into Pinecone using the index, vector, and column info as metadata (Pinecone API)
import os
import pinecone
import csv
from openai import OpenAI
from dotenv import load_dotenv
import argparse
load_dotenv()
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
# Initialize APIs
pinecone.init(api_key=os.getenv('PINECONE_API_KEY'), environment='gcp-starter')
# Function to generate sentence using GPT-4
def generate_sentence(input_row, columns=None):
model = "gpt-4-1106-preview"
prompt = f"convert this row from a dataset into a 100 word concise but descriptive paragraph with all the technical specs that I can convert into an embedding. Here are the columns for the dataset. Please ensure data from each available column must included: {columns} -> {input_row}"
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": "You are an advanced high iq human who follows instructions exactly."},
{"role": "user", "content": prompt},
],
max_tokens=3000)
return {"prompt": prompt, "response": response.choices[0].message.content, "model": model}
# Function to convert sentence to embedding
def convert_to_embedding(sentence):
response = client.embeddings.create(
model="text-embedding-ada-002",
input=sentence
)
return response.data[0].embedding
# Function to upsert into Pinecone
def upsert_to_pinecone(id, vector, metadata):
index = pinecone.Index('home-depot')
index.upsert ([(id, vector, metadata)])
def main():
pinecone.describe_index("home-depot")
index = pinecone.Index("home-depot")
parser = argparse.ArgumentParser(description='Process a CSV file.')
parser.add_argument('csvfile', type=str, help='The CSV file to process')
args = parser.parse_args()
with open(args.csvfile, newline='') as csvfile:
reader = csv.DictReader(csvfile)
columns = " ".join(reader.fieldnames) # Get headers as a space-separated sentence
for i,row in enumerate(reader):
row_str = " ".join([str(val) for val in row.values()])
generated_sentence_info = generate_sentence(row_str, columns)
embedding_vector = convert_to_embedding(generated_sentence_info['response'])
metadata = {key: row[key] for key in row}
metadata['generated_sentence'] = str(generated_sentence_info['response'])
metadata['prompt'] = str(generated_sentence_info['prompt'])
metadata['model'] = str(generated_sentence_info['model'])
upsert_to_pinecone(row['id'], embedding_vector, metadata)
print(i, row_str, generated_sentence_info['prompt'], generated_sentence_info['model'], generated_sentence_info['response'])
if __name__ == "__main__":
main()
| [
"You are an advanced high iq human who follows instructions exactly.",
"convert this row from a dataset into a 100 word concise but descriptive paragraph with all the technical specs that I can convert into an embedding. Here are the columns for the dataset. Please ensure data from each available column must included: PLACEHOLDER -> PLACEHOLDER"
] |
2024-01-10 | MasterMilkX/BMO_chatbot_prototype | Python~gpt_augment2.py | import openai
import sys
from tqdm import tqdm
openai.api_key=open('../data/gpt3_api_key.txt', "r").read().strip() # enter open ai API key here :)
# augment the character description labels
labels = open((sys.argv[1] if len(sys.argv) > 1 else "../data/rip_data/character_desc.txt"), "r").read().split('\n')
def passDesc(desc):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"""
Can you give me 4 different ways to say this phrase describing a video game character's appearance in a similar format: \"{desc}\"?
""",
temperature=0.7,
max_tokens=200,
)
return response.choices[0].text
#test case
# print(passDesc("a white rabbit with a blue vest and a pocket watch"))
gpt_labels = {}
with tqdm(total=len(labels)) as pbar:
for i,label in enumerate(labels):
pbar.set_description(f"> {label}")
gpt_labels[i] = [label]
gpt_labels[i].append(passDesc(label))
pbar.update(1)
# write to file
with open("../data/rip_data/character_desc_gpt.txt", "w+") as f:
for key, value in gpt_labels.items():
for l in value:
f.write(l + "\n")
f.write("&\n")
| [
"\n Can you give me 4 different ways to say this phrase describing a video game character's appearance in a similar format: \"PLACEHOLDER\"?\n "
] |
2024-01-10 | grauvictor/ragas | src~ragas~llama_index~evaluation.py | from __future__ import annotations
import typing as t
from datasets import Dataset
from ragas import evaluate as ragas_evaluate
from ragas.evaluation import Result
from ragas.metrics.base import Metric
if t.TYPE_CHECKING:
from llama_index.indices.query.base import BaseQueryEngine
def evaluate(
query_engine: BaseQueryEngine,
metrics: list[Metric],
questions: list[str],
ground_truths: t.Optional[list[str]] = None,
) -> Result:
"""
Run evaluation of llama_index QueryEngine with different metrics
Parameters
----------
query_engine : BaseQueryEngine
The QueryEngine that is to be evaluated
metrics : list[Metric]
The ragas metrics to use for evaluation.
questions : list[str]
List of questions to evaluate on
ground_truths : list[str], optional
List of ground_truths answer to the question to evaluate on.
Returns
-------
Result
Result object containing the scores of each metric. You can use this do analysis
later. If the top 3 metrics are provided then it also returns the `ragas_score`
for the entire pipeline.
Raises
------
ValueError
if validation fails because the columns required for the metrics are missing or
if the columns are of the wrong format.
Examples
--------
Once you have a llama_index QueryEngine created you can use it to evaluate on a list
of questions.
Import everything you need:
>>> from ragas.metrics import faithfulness, answer_relevancy, context_precision
>>> from ragas.metrics.critique import harmfulness
>>> from ragas.llama_index import evaluate
init the query engine, get the questions and choose the metrics you want to use:
>>> query_engine = # from llamaindex
>>> questions: list[str] = [] # from somewhere
>>> metrics = [faithfulness, answer_relevancy, context_precision, harmfulness]
Run the evaluation:
>>> r = evaluate(query_engine, metrics, questions)
analysis the result:
>>> print(r) # prints the scores of each metric
>>> r.to_pandas() # returns a pandas dataframe if you want to do further analysis
"""
try:
from llama_index.async_utils import run_async_tasks
except ImportError:
raise ImportError(
"llama_index must be installed to use this function. "
"Install it with `pip install llama_index`."
)
# TODO: rate limit, error handling, retries
responses = run_async_tasks([query_engine.aquery(q) for q in questions])
answers = []
contexts = []
for r in responses:
answers.append(r.response)
contexts.append([c.node.get_content() for c in r.source_nodes])
dataset_dict = {
"question": questions,
"answer": answers,
"contexts": contexts,
}
if ground_truths is not None:
dataset_dict["ground_truths"] = ground_truths
ds = Dataset.from_dict(dataset_dict)
result = ragas_evaluate(ds, metrics)
return result
| [] |
2024-01-10 | grauvictor/ragas | src~ragas~metrics~faithfulness.py | from __future__ import annotations
import typing as t
from dataclasses import dataclass
from langchain.callbacks.manager import CallbackManager, trace_as_chain_group
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from ragas.metrics.base import EvaluationMode, MetricWithLLM
if t.TYPE_CHECKING:
from datasets import Dataset
#################
# NLI Score
#################
LONG_FORM_ANSWER_PROMPT = HumanMessagePromptTemplate.from_template(
"""\
Given a question and answer, create one or more statements from each sentence in the given answer.
question: Who was Albert Einstein and what is he best known for?
answer: He was a German-born theoretical physicist, widely acknowledged to be one of the greatest and most influential physicists of all time. He was best known for developing the theory of relativity, he also made important contributions to the development of the theory of quantum mechanics.
statements:\nAlbert Einstein was born in Germany.\nAlbert Einstein was best known for his theory of relativity.
question: Cadmium Chloride is slightly soluble in this chemical, it is also called what?
answer: alcohol
statements:\nCadmium Chloride is slightly soluble in alcohol.
question: Were Shahul and Jithin of the same nationality?
answer: They were from different countries.
statements:\nShahul and Jithin were from different countries.
question:{question}
answer: {answer}
statements:\n""" # noqa: E501
)
NLI_STATEMENTS_MESSAGE = HumanMessagePromptTemplate.from_template(
"""
Prompt: Natural language inference
Consider the given context and following statements, then determine whether they are supported by the information present in the context.Provide a brief explanation for each statement before arriving at the verdict (Yes/No). Provide a final verdict for each statement in order at the end in the given format. Do not deviate from the specified format.
Context:\nJohn is a student at XYZ University. He is pursuing a degree in Computer Science. He is enrolled in several courses this semester, including Data Structures, Algorithms, and Database Management. John is a diligent student and spends a significant amount of time studying and completing assignments. He often stays late in the library to work on his projects.
statements:\n1. John is majoring in Biology.\n2. John is taking a course on Artificial Intelligence.\n3. John is a dedicated student.\n4. John has a part-time job.\n5. John is interested in computer programming.\n
Answer:
1. John is majoring in Biology.
Explanation: John's major is explicitly mentioned as Computer Science. There is no information suggesting he is majoring in Biology. Verdict: No.
2. John is taking a course on Artificial Intelligence.
Explanation: The context mentions the courses John is currently enrolled in, and Artificial Intelligence is not mentioned. Therefore, it cannot be deduced that John is taking a course on AI. Verdict: No.
3. John is a dedicated student.
Explanation: The prompt states that he spends a significant amount of time studying and completing assignments. Additionally, it mentions that he often stays late in the library to work on his projects, which implies dedication. Verdict: Yes.
4. John has a part-time job.
Explanation: There is no information given in the context about John having a part-time job. Therefore, it cannot be deduced that John has a part-time job. Verdict: No.
5. John is interested in computer programming.
Explanation: The context states that John is pursuing a degree in Computer Science, which implies an interest in computer programming. Verdict: Yes.
Final verdict for each statement in order: No. No. Yes. No. Yes.
context:\n{context}
statements:\n{statements}
Answer:
""" # noqa: E501
)
@dataclass
class Faithfulness(MetricWithLLM):
name: str = "faithfulness"
evaluation_mode: EvaluationMode = EvaluationMode.qac
batch_size: int = 15
def _score_batch(
self: t.Self,
ds: Dataset,
callbacks: t.Optional[CallbackManager] = None,
callback_group_name: str = "batch",
) -> list[float]:
"""
returns the NLI score for each (q, c, a) pair
"""
question, answer, contexts = ds["question"], ds["answer"], ds["contexts"]
prompts = []
with trace_as_chain_group(
callback_group_name, callback_manager=callbacks
) as batch_group:
for q, a in zip(question, answer):
human_prompt = LONG_FORM_ANSWER_PROMPT.format(question=q, answer=a)
prompts.append(ChatPromptTemplate.from_messages([human_prompt]))
result = self.llm.generate(prompts, callbacks=batch_group)
list_statements: list[list[str]] = []
for output in result.generations:
# use only the first generation for each prompt
statements = output[0].text.split("\n")
list_statements.append(statements)
prompts = []
for context, statements in zip(contexts, list_statements):
statements_str: str = "\n".join(
[f"{i+1}.{st}" for i, st in enumerate(statements)]
)
contexts_str: str = "\n".join(context)
human_prompt = NLI_STATEMENTS_MESSAGE.format(
context=contexts_str, statements=statements_str
)
prompts.append(ChatPromptTemplate.from_messages([human_prompt]))
result = self.llm.generate(prompts, callbacks=batch_group)
outputs = result.generations
scores = []
final_answer = "Final verdict for each statement in order:"
final_answer = final_answer.lower()
for i, output in enumerate(outputs):
output = output[0].text.lower().strip()
if output.find(final_answer) != -1:
output = output[output.find(final_answer) + len(final_answer) :]
score = sum(
0 if "yes" in answer else 1
for answer in output.strip().split(".")
if answer != ""
)
score = score / len(list_statements[i])
else:
score = max(0, output.count("verdict: no")) / len(
list_statements[i]
)
scores.append(1 - score)
return scores
faithfulness = Faithfulness()
| [
"Given a question and answer, create one or more statements from each sentence in the given answer.\nquestion: Who was Albert Einstein and what is he best known for?\nanswer: He was a German-born theoretical physicist, widely acknowledged to be one of the greatest and most influential physicists of all time. He was best known for developing the theory of relativity, he also made important contributions to the development of the theory of quantum mechanics.\nstatements:\nAlbert Einstein was born in Germany.\nAlbert Einstein was best known for his theory of relativity.\nquestion: Cadmium Chloride is slightly soluble in this chemical, it is also called what?\nanswer: alcohol\nstatements:\nCadmium Chloride is slightly soluble in alcohol.\nquestion: Were Shahul and Jithin of the same nationality?\nanswer: They were from different countries.\nstatements:\nShahul and Jithin were from different countries.\nquestion:{question}\nanswer: {answer}\nstatements:\n",
"[]",
"\nPrompt: Natural language inference\nConsider the given context and following statements, then determine whether they are supported by the information present in the context.Provide a brief explanation for each statement before arriving at the verdict (Yes/No). Provide a final verdict for each statement in order at the end in the given format. Do not deviate from the specified format.\n\nContext:\nJohn is a student at XYZ University. He is pursuing a degree in Computer Science. He is enrolled in several courses this semester, including Data Structures, Algorithms, and Database Management. John is a diligent student and spends a significant amount of time studying and completing assignments. He often stays late in the library to work on his projects.\nstatements:\n1. John is majoring in Biology.\n2. John is taking a course on Artificial Intelligence.\n3. John is a dedicated student.\n4. John has a part-time job.\n5. John is interested in computer programming.\n\nAnswer:\n1. John is majoring in Biology.\nExplanation: John's major is explicitly mentioned as Computer Science. There is no information suggesting he is majoring in Biology. Verdict: No.\n2. John is taking a course on Artificial Intelligence.\nExplanation: The context mentions the courses John is currently enrolled in, and Artificial Intelligence is not mentioned. Therefore, it cannot be deduced that John is taking a course on AI. Verdict: No.\n3. John is a dedicated student.\nExplanation: The prompt states that he spends a significant amount of time studying and completing assignments. Additionally, it mentions that he often stays late in the library to work on his projects, which implies dedication. Verdict: Yes.\n4. John has a part-time job.\nExplanation: There is no information given in the context about John having a part-time job. Therefore, it cannot be deduced that John has a part-time job. Verdict: No.\n5. John is interested in computer programming.\nExplanation: The context states that John is pursuing a degree in Computer Science, which implies an interest in computer programming. Verdict: Yes.\nFinal verdict for each statement in order: No. No. Yes. No. Yes.\ncontext:\n{context}\nstatements:\n{statements}\nAnswer:\n"
] |
2024-01-10 | grauvictor/ragas | src~ragas~langchain~evalchain.py | from __future__ import annotations
import typing as t
from collections import defaultdict
from datasets import Dataset
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.schema import RUN_KEY
from langsmith.evaluation import EvaluationResult, RunEvaluator
from langsmith.schemas import Example, Run
from ragas.metrics.base import EvaluationMode, Metric
from ragas.validation import EVALMODE_TO_COLUMNS
if t.TYPE_CHECKING:
from langchain.callbacks.base import Callbacks
class RagasEvaluatorChain(Chain, RunEvaluator):
"""
Wrapper around ragas Metrics to use them with langsmith.
"""
metric: Metric
def __init__(self, **kwargs: t.Any):
super().__init__(**kwargs)
self.metric.init_model()
@property
def input_keys(self) -> list[str]:
keys = ["query", "result"]
if self.metric.evaluation_mode in [EvaluationMode.qac, EvaluationMode.qc]:
keys += ["source_documents"]
if self.metric.evaluation_mode in [EvaluationMode.gc]:
keys += ["ground_truths"]
return keys
@property
def output_keys(self) -> list[str]:
return [f"{self.metric.name}_score"]
def _call(
self,
inputs: dict[str, t.Any],
run_manager: t.Optional[CallbackManagerForChainRun] = None,
) -> dict[str, t.Any]:
"""
Call the evaluation chain.
"""
self._validate(inputs)
contexts = []
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
if "source_documents" in inputs:
for document in inputs["source_documents"]:
if isinstance(document, dict):
contexts.append(document["page_content"])
else:
contexts.append(document.page_content)
ground_truths = []
if "ground_truths" in inputs:
ground_truths = inputs["ground_truths"]
question = inputs["query"]
answer = inputs["result"]
score = self.metric.score_single(
{
"question": question,
"answer": answer,
"contexts": contexts,
"ground_truths": ground_truths,
},
callbacks=callbacks,
)
return {f"{self.metric.name}_score": score}
def _validate(
self,
input: dict[str, t.Any],
question_key: str = "query",
prediction_key: str = "result",
context_key: str = "source_documents",
) -> None:
...
# validate each example
required_columns = EVALMODE_TO_COLUMNS[self.metric.evaluation_mode]
if "question" in required_columns and question_key not in input:
raise ValueError(
f'"{question_key}" is required in each example'
f"for the metric[{self.metric.name}] you have chosen."
)
if "answer" in required_columns and prediction_key not in input:
raise ValueError(
f'"{prediction_key}" is required in each prediction'
f"for the metric[{self.metric.name}] you have chosen."
)
if "contexts" in required_columns and context_key not in input:
raise ValueError(
f'"{context_key}" is required in each prediction for the '
f"metric[{self.metric.name}] you have chosen."
)
if "ground_truths" in required_columns and "ground_truths" not in input:
raise ValueError(
f'"ground_truths" is required in each prediction for the '
f"metric[{self.metric.name}] you have chosen."
)
def evaluate(
self,
examples: t.Sequence[dict],
predictions: t.Sequence[dict],
question_key: str = "query",
prediction_key: str = "result",
context_key: str = "source_documents",
ground_truths_key: str = "ground_truths",
*,
callbacks: Callbacks = None,
) -> list[dict]:
"""Evaluate question answering examples and predictions."""
dataset_dict = defaultdict(list)
# validation
if len(examples) != len(predictions):
raise ValueError(
"number of examples and predictions must be same. Got "
f"len(examples)={len(examples)} and len(predictions)={len(predictions)}"
)
for i, example in enumerate(examples):
self._validate(
{**example, **predictions[i]}, question_key, prediction_key, context_key
)
# transform into Dataset that is supported by ragas
if self.metric.evaluation_mode in [
EvaluationMode.qac,
EvaluationMode.qc,
EvaluationMode.qa,
]:
dataset_dict["question"].append(example[question_key])
if self.metric.evaluation_mode in [EvaluationMode.qac, EvaluationMode.qa]:
dataset_dict["answer"].append(predictions[i][prediction_key])
if self.metric.evaluation_mode in [
EvaluationMode.qac,
EvaluationMode.qc,
EvaluationMode.gc,
]:
dataset_dict["contexts"].append(
[d.page_content for d in predictions[i][context_key]]
)
if self.metric.evaluation_mode == EvaluationMode.gc:
if isinstance(example["ground_truths"], list):
dataset_dict["ground_truths"].append(example["ground_truths"])
else:
dataset_dict["ground_truths"].append([example["ground_truths"]])
dataset = Dataset.from_dict(dataset_dict)
# evaluate
dataset_with_scores = self.metric.score(dataset, callbacks=callbacks)
scores = [
{f"{self.metric.name}_score": score}
for score in dataset_with_scores[self.metric.name]
]
return scores
def evaluate_run(
self, run: Run, example: t.Optional[Example] = None
) -> EvaluationResult:
"""
Evaluate a langsmith run
"""
if run.outputs is None:
raise ValueError("The chain should return results and service_document.")
if example is None:
raise ValueError("Examples have to be provided.")
chain_eval = run.outputs
chain_eval["query"] = run.inputs["query"]
if self.metric.evaluation_mode == EvaluationMode.gc:
if example.outputs is None or "ground_truths" not in example.outputs:
raise ValueError("expected `ground_truths` in example outputs.")
chain_eval["ground_truths"] = example.outputs["ground_truths"]
eval_output = self(chain_eval, include_run_info=True)
score_name = f"{self.metric.name}_score"
evaluation_result = EvaluationResult(
key=f"{self.metric.name}_score", score=eval_output[score_name]
)
if RUN_KEY in eval_output:
evaluation_result.evaluator_info[RUN_KEY] = eval_output[RUN_KEY]
return evaluation_result
| [] |
2024-01-10 | thelostpotato27/drive_assist | google_auth.py | from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
# import openai
# openai.organization = "org-Q5blmpJY4csYpHCZjCDruM3H"
# openai.api_key = os.getenv("OPENAI_API_KEY")
# openai.Model.list()
# pipenv shell
import os
# Scopes define the level of access you need from the user.
SCOPES = ['https://www.googleapis.com/auth/drive']
def authenticate_google_drive():
# If the token.json file exists, load the existing credentials from it.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
else:
# If no valid credentials are found, start the OAuth flow to obtain them.
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES, redirect_uri='http://localhost:8000/')
creds = flow.run_local_server(port=8000)
# Save the credentials for future use.
with open('token.json', 'w') as token:
token.write(creds.to_json())
# Check if the credentials have a valid access token or refresh token.
if not creds.valid:
# If the credentials are not valid, initiate the OAuth flow to obtain new tokens.
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the new credentials in the token.json file.
with open('token.json', 'w') as token:
token.write(creds.to_json())
return creds
def retrieve_google_drive_files(creds):
# Build the service object for calling the Drive API.
drive_service = build('drive', 'v3', credentials=creds)
# Call the Drive API to retrieve a list of files with the given search query.
response = drive_service.files().list(
q="mimeType='application/vnd.google-apps.document'",
fields="files(name, id)"
).execute()
# Get the list of files from the response.
files = response.get('files', [])
# Print the files.
if not files:
print('No files found.')
else:
print('Files:')
for file in files:
print(f"{file['name']} ({file['id']})")
# def chatgpt_requests(prompt):
# #this function will be called by the chatgpt app to get the response from the chatgpt api
# curl https://api.openai.com/v1/chat/completions \
# -H "Content-Type: application/json" \
# -H "Authorization: Bearer $OPENAI_API_KEY" \
# -d {
# "model": "gpt-3.5-turbo",
# "messages": [{"role": "user", "content": "Say this is a test!"}],
# "temperature": 0.7
# }
def main():
# Authenticate and get the credentials object.
creds = authenticate_google_drive()
# Now, you can use the "creds" object to make authorized API requests to Google Drive.
# For example, you can list files in Google Drive using the Google Drive API.
retrieve_google_drive_files(creds)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | Rui-Bebiano/openai-cookbook | solutions~web_crawl_Q%26A~web-qa.py | ################################################################################
### Step 1
################################################################################
import requests
import re
import urllib.request
from bs4 import BeautifulSoup
from collections import deque
from html.parser import HTMLParser
from urllib.parse import urlparse
import os
import pandas as pd
import tiktoken
import openai
from openai.embeddings_utils import distances_from_embeddings
import numpy as np
from openai.embeddings_utils import distances_from_embeddings, cosine_similarity
# Regex pattern to match a URL
HTTP_URL_PATTERN = r'^http[s]*://.+'
# Define root domain to crawl
domain = "openai.com"
full_url = "https://openai.com/"
# Create a class to parse the HTML and get the hyperlinks
class HyperlinkParser(HTMLParser):
def __init__(self):
super().__init__()
# Create a list to store the hyperlinks
self.hyperlinks = []
# Override the HTMLParser's handle_starttag method to get the hyperlinks
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
# If the tag is an anchor tag and it has an href attribute, add the href attribute to the list of hyperlinks
if tag == "a" and "href" in attrs:
self.hyperlinks.append(attrs["href"])
################################################################################
### Step 2
################################################################################
# Function to get the hyperlinks from a URL
def get_hyperlinks(url):
# Try to open the URL and read the HTML
try:
# Open the URL and read the HTML
with urllib.request.urlopen(url) as response:
# If the response is not HTML, return an empty list
if not response.info().get('Content-Type').startswith("text/html"):
return []
# Decode the HTML
html = response.read().decode('utf-8')
except Exception as e:
print(e)
return []
# Create the HTML Parser and then Parse the HTML to get hyperlinks
parser = HyperlinkParser()
parser.feed(html)
return parser.hyperlinks
################################################################################
### Step 3
################################################################################
# Function to get the hyperlinks from a URL that are within the same domain
def get_domain_hyperlinks(local_domain, url):
clean_links = []
for link in set(get_hyperlinks(url)):
clean_link = None
# If the link is a URL, check if it is within the same domain
if re.search(HTTP_URL_PATTERN, link):
# Parse the URL and check if the domain is the same
url_obj = urlparse(link)
if url_obj.netloc == local_domain:
clean_link = link
# If the link is not a URL, check if it is a relative link
else:
if link.startswith("/"):
link = link[1:]
elif link.startswith("#") or link.startswith("mailto:"):
continue
clean_link = "https://" + local_domain + "/" + link
if clean_link is not None:
if clean_link.endswith("/"):
clean_link = clean_link[:-1]
clean_links.append(clean_link)
# Return the list of hyperlinks that are within the same domain
return list(set(clean_links))
################################################################################
### Step 4
################################################################################
def crawl(url):
# Parse the URL and get the domain
local_domain = urlparse(url).netloc
# Create a queue to store the URLs to crawl
queue = deque([url])
# Create a set to store the URLs that have already been seen (no duplicates)
seen = set([url])
# Create a directory to store the text files
if not os.path.exists("text/"):
os.mkdir("text/")
if not os.path.exists("text/"+local_domain+"/"):
os.mkdir("text/" + local_domain + "/")
# Create a directory to store the csv files
if not os.path.exists("processed"):
os.mkdir("processed")
# While the queue is not empty, continue crawling
while queue:
# Get the next URL from the queue
url = queue.pop()
print(url) # for debugging and to see the progress
# Save text from the url to a <url>.txt file
with open('text/'+local_domain+'/'+url[8:].replace("/", "_") + ".txt", "w", encoding="UTF-8") as f:
# Get the text from the URL using BeautifulSoup
soup = BeautifulSoup(requests.get(url).text, "html.parser")
# Get the text but remove the tags
text = soup.get_text()
# If the crawler gets to a page that requires JavaScript, it will stop the crawl
if ("You need to enable JavaScript to run this app." in text):
print("Unable to parse page " + url + " due to JavaScript being required")
# Otherwise, write the text to the file in the text directory
f.write(text)
# Get the hyperlinks from the URL and add them to the queue
for link in get_domain_hyperlinks(local_domain, url):
if link not in seen:
queue.append(link)
seen.add(link)
crawl(full_url)
################################################################################
### Step 5
################################################################################
def remove_newlines(serie):
serie = serie.str.replace('\n', ' ')
serie = serie.str.replace('\\n', ' ')
serie = serie.str.replace(' ', ' ')
serie = serie.str.replace(' ', ' ')
return serie
################################################################################
### Step 6
################################################################################
# Create a list to store the text files
texts=[]
# Get all the text files in the text directory
for file in os.listdir("text/" + domain + "/"):
# Open the file and read the text
with open("text/" + domain + "/" + file, "r", encoding="UTF-8") as f:
text = f.read()
# Omit the first 11 lines and the last 4 lines, then replace -, _, and #update with spaces.
texts.append((file[11:-4].replace('-',' ').replace('_', ' ').replace('#update',''), text))
# Create a dataframe from the list of texts
df = pd.DataFrame(texts, columns = ['fname', 'text'])
# Set the text column to be the raw text with the newlines removed
df['text'] = df.fname + ". " + remove_newlines(df.text)
df.to_csv('processed/scraped.csv')
df.head()
################################################################################
### Step 7
################################################################################
# Load the cl100k_base tokenizer which is designed to work with the ada-002 model
tokenizer = tiktoken.get_encoding("cl100k_base")
df = pd.read_csv('processed/scraped.csv', index_col=0)
df.columns = ['title', 'text']
# Tokenize the text and save the number of tokens to a new column
df['n_tokens'] = df.text.apply(lambda x: len(tokenizer.encode(x)))
# Visualize the distribution of the number of tokens per row using a histogram
df.n_tokens.hist()
################################################################################
### Step 8
################################################################################
max_tokens = 500
# Function to split the text into chunks of a maximum number of tokens
def split_into_many(text, max_tokens = max_tokens):
# Split the text into sentences
sentences = text.split('. ')
# Get the number of tokens for each sentence
n_tokens = [len(tokenizer.encode(" " + sentence)) for sentence in sentences]
chunks = []
tokens_so_far = 0
chunk = []
# Loop through the sentences and tokens joined together in a tuple
for sentence, token in zip(sentences, n_tokens):
# If the number of tokens so far plus the number of tokens in the current sentence is greater
# than the max number of tokens, then add the chunk to the list of chunks and reset
# the chunk and tokens so far
if tokens_so_far + token > max_tokens:
chunks.append(". ".join(chunk) + ".")
chunk = []
tokens_so_far = 0
# If the number of tokens in the current sentence is greater than the max number of
# tokens, go to the next sentence
if token > max_tokens:
continue
# Otherwise, add the sentence to the chunk and add the number of tokens to the total
chunk.append(sentence)
tokens_so_far += token + 1
return chunks
shortened = []
# Loop through the dataframe
for row in df.iterrows():
# If the text is None, go to the next row
if row[1]['text'] is None:
continue
# If the number of tokens is greater than the max number of tokens, split the text into chunks
if row[1]['n_tokens'] > max_tokens:
shortened += split_into_many(row[1]['text'])
# Otherwise, add the text to the list of shortened texts
else:
shortened.append( row[1]['text'] )
################################################################################
### Step 9
################################################################################
df = pd.DataFrame(shortened, columns = ['text'])
df['n_tokens'] = df.text.apply(lambda x: len(tokenizer.encode(x)))
df.n_tokens.hist()
################################################################################
### Step 10
################################################################################
df['embeddings'] = df.text.apply(lambda x: openai.Embedding.create(input=x, engine='text-embedding-ada-002')['data'][0]['embedding'])
df.to_csv('processed/embeddings.csv')
df.head()
################################################################################
### Step 11
################################################################################
df=pd.read_csv('processed/embeddings.csv', index_col=0)
df['embeddings'] = df['embeddings'].apply(eval).apply(np.array)
df.head()
################################################################################
### Step 12
################################################################################
def create_context(
question, df, max_len=1800, size="ada"
):
"""
Create a context for a question by finding the most similar context from the dataframe
"""
# Get the embeddings for the question
q_embeddings = openai.Embedding.create(input=question, engine='text-embedding-ada-002')['data'][0]['embedding']
# Get the distances from the embeddings
df['distances'] = distances_from_embeddings(q_embeddings, df['embeddings'].values, distance_metric='cosine')
returns = []
cur_len = 0
# Sort by distance and add the text to the context until the context is too long
for i, row in df.sort_values('distances', ascending=True).iterrows():
# Add the length of the text to the current length
cur_len += row['n_tokens'] + 4
# If the context is too long, break
if cur_len > max_len:
break
# Else add it to the text that is being returned
returns.append(row["text"])
# Return the context
return "\n\n###\n\n".join(returns)
def answer_question(
df,
model="text-davinci-003",
question="Am I allowed to publish model outputs to Twitter, without a human review?",
max_len=1800,
size="ada",
debug=False,
max_tokens=150,
stop_sequence=None
):
"""
Answer a question based on the most similar context from the dataframe texts
"""
context = create_context(
question,
df,
max_len=max_len,
size=size,
)
# If debug, print the raw model response
if debug:
print("Context:\n" + context)
print("\n\n")
try:
# Create a completions using the questin and context
response = openai.Completion.create(
prompt=f"Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\nContext: {context}\n\n---\n\nQuestion: {question}\nAnswer:",
temperature=0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_sequence,
model=model,
)
return response["choices"][0]["text"].strip()
except Exception as e:
print(e)
return ""
################################################################################
### Step 13
################################################################################
print(answer_question(df, question="What day is it?", debug=False))
print(answer_question(df, question="What is our newest embeddings model?"))
| [
"Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\nContext: PLACEHOLDER\n\n---\n\nQuestion: PLACEHOLDER\nAnswer:"
] |
2024-01-10 | ChrisLiang33/movie-semantic-search | movie_recs2.py | import pymongo
import openai
client = pymongo.MongoClient("2")
db = client.sample_mflix
collection = db.movies
openai.api_key = '2'
def generate_embedding(text: str) -> list[float]:
response = openai.Embedding.create(
model="text-embedding-ada-002",
input=text
)
return response['data'][0]['embedding']
query = "imaginary characters from outer space at war"
results = collection.aggregate([
{"$vectorSearch": {
"queryVector": generate_embedding(query),
"path": "plot_embedding",
"numCandidates": 100,
"limit": 4,
"index": "PlotSemanticSearch",
}}
]);
for document in results:
print(f'Movie Name: {document["title"]},\nMovie Plot: {document["plot"]}\n') | [] |
2024-01-10 | JorgeChiozzini/pipeline-ETL-ChatGPT | Programa.py | # Extração: Extrair as perguntas e respostas de um arquivo CSV.
import pandas as pd
def extract_data(file_path):
data = pd.read_csv(file_path)
return data
input_file_path = 'faq_data.csv'
data = extract_data(input_file_path)
# Transformação: Preparar os dados para alimentar o modelo de ChatGPT.
# Nenhuma transformação específica é necessária aqui, já que estamos usando perguntas e respostas diretamente.
# Carga: Criar um modelo de ChatGPT e disponibilizá-lo como um serviço de chatbot.
import openai
# Configure sua chave da API do OpenAI
openai.api_key = "SUA_CHAVE_DE_API_AQUI"
def generate_response(question):
prompt = f"Pergunta: {question}\nResposta:"
response = openai.Completion.create(
engine="text-davinci-003", # Escolha o mecanismo de acordo com suas necessidades
prompt=prompt,
max_tokens=50 # Ajuste este valor para controlar o tamanho da resposta
)
return response.choices[0].text.strip()
# Exemplo de interação com o chatbot
while True:
user_input = input("Usuário: ")
if user_input.lower() == "sair":
print("Chatbot encerrado.")
break
else:
response = generate_response(user_input)
print("Chatbot:", response)
# Certifique-se de ter um arquivo CSV chamado "faq_data.csv" contendo perguntas e respostas. Substitua "SUA_CHAVE_DE_API_AQUI" pela sua chave de API do OpenAI.
| [
"Pergunta: PLACEHOLDER\nResposta:"
] |
2024-01-10 | wykswr/AnyLLM | server~app~services.py | import re
from typing import Callable
from langchain.chains import LLMChain
from langchain.llms.ai21 import AI21
from langchain.llms.base import BaseLLM
from langchain.llms.openai import OpenAI
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts.prompt import PromptTemplate
class ChatBot:
def __init__(self, llm: BaseLLM):
self.instruction = """
Your name is Alice, and your creator is Yukai.
The following is a friendly conversation between a human and you.
You is talkative and provides lots of specific details from its context.
Remember don't predict what the human will say, and don't generate the conversation with human,
only provide a response to the human's latest message.
"""
template = """
{instruction}
{chat_history}
Human: {human_input}
Alice:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input", "instruction"],
template=template
)
memory = ConversationBufferWindowMemory(memory_key="chat_history", ai_prefix="Alice", input_key="human_input",
k=6)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
memory=memory,
)
self.llm_chain = llm_chain
def chat(self, human_input):
reply = self.llm_chain.predict(human_input=human_input, instruction=self.instruction)
return harmless_reply(reply)
def make_ai21_chatbot(api_key: str) -> ChatBot:
llm = AI21(ai21_api_key=api_key, temperature=.4)
return ChatBot(llm=llm)
def make_openai_chatbot(api_key: str) -> ChatBot:
llm = OpenAI(openai_api_key=api_key)
return ChatBot(llm=llm)
def get_chatbot_maker() -> dict[str, Callable]:
return {"AI21": make_ai21_chatbot,
"openAI": make_openai_chatbot}
def harmless_reply(text: str) -> str:
pattern = r"(Human|Alice): .*"
cleaned_text = re.sub(pattern, "", text)
return cleaned_text.strip() | [
"chat_history",
"\n {instruction}\n\n {chat_history}\n\n Human: {human_input}\n Alice:",
"instruction",
"human_input"
] |
2024-01-10 | Larlar03/slr-genai-chatbot | backend~server~scripts~scraper.py | import os
from dotenv import load_dotenv
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from pathlib import Path
# get api key
current_dir = os.getcwd()
dotenv_path = os.path.join(current_dir, ".env")
_ = load_dotenv(dotenv_path)
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
output_directory = "documents/faiss_db"
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
loader = UnstructuredPDFLoader("documents/pdfs/photography.pdf")
pages = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=350,
chunk_overlap=25,
)
docs = text_splitter.split_documents(pages)
folder = Path(output_directory)
if folder.exists():
for file in folder.glob("*"):
file.unlink() # remove all files and subdirectories
else:
folder.mkdir(parents=True, exist_ok=True)
vectordb = FAISS.from_documents(
docs,
embeddings,
)
vectordb.save_local(output_directory)
print(f"{len(docs)} docs saved to vector store")
| [] |
2024-01-10 | cmmolanos1/holbertonschool-machine_learning | reinforcement_learning~0x00-q_learning~0-load_env.py | #!/usr/bin/env python3
from gym.envs.toy_text.frozen_lake import FrozenLakeEnv
def load_frozen_lake(desc=None, map_name=None, is_slippery=False):
"""Loads the pre-made FrozenLakeEnv environment from OpenAI’s gym.
Args:
desc (list): is either None or a list of lists containing a custom
description of the map to load for the environment.
map_name (str): is either None or a string containing the pre-made
map to load.
is_slippery (bool): determine if the ice is slippery.
Returns:
the environment.
"""
env = FrozenLakeEnv(desc, map_name, is_slippery)
return env
| [] |
2024-01-10 | Parelho/PI_1_Semestre | Game.py | import pygame
import pygame_textinput
import time
import psycopg
import os
import random
import openai
import re
from dotenv import load_dotenv
# Inserir no arquivo .env a chave de api da openai para o nivel infinito funcionar
load_dotenv()
openai.api_key = os.getenv("api_key")
# Inicializa o pygame
pygame.init()
# Deixa o nome da janela como CodeQuiz
pygame.display.set_caption('CodeQuiz')
# Utilizados como workaround de um bug que estava impedindo a classe Login de pegar os valores atualizados de acertos, level e streak, se conseguir resolver o bug irei remover essa mostruosidade
acertos = 0
level = 0
streak = 0
coins = 0
logoff = False
boost = False
boost_ok = False
shield = False
shield_ok = False
fechar = False
cosmetico1_desbloqueado = False
cosmetico2_desbloqueado = False
cosmetico3_desbloqueado = False
cosmetico1_ok = False
cosmetico2_ok = False
cosmetico3_ok = False
mascote = pygame.image.load(os.path.join("imgs", "Mascote.png"))
# Gera o input do chatgpt pra gerar as pergutas e respostas do nivel infinito
def gerar_texto_chatgpt():
try:
global completion
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Uma pergunta simples com respostas curtas sobre programar em Python. de 4 alternativas, sendo apenas 1 correta informe a resposta correta na primeira alternativa"}
]
)
except:
print("erro de conexao a api da openai")
# Constantes
win = pygame.display.set_mode((900,600))
clock = pygame.time.Clock()
FONT = pygame.font.SysFont("timesnewroman", 50)
FONT_LOGIN = pygame.font.SysFont("timesnewroman", 30)
FONT_MOEDAS = pygame.font.SysFont("comicsans", 35)
FONT_MASCOTE = pygame.font.SysFont("comicsans", 20)
FONT_PERGUNTA = pygame.font.SysFont("arial", 20)
FONT_NIVEL = pygame.font.SysFont("arial", 100)
# Classes
class Jogador:
def __init__(self):
self.tema = "white"
self.tema_rect = pygame.Rect(675, 100, 200, 100)
self.tema_azul_rect = pygame.Rect(675, 250, 200, 100)
self.engrenagem_rect = pygame.Rect(20, 500, 100, 100)
self.loja_rect = pygame.Rect(120, 500, 100, 100)
self.voltar_rect = pygame.Rect(400, 500, 100, 30)
self.boost_rect = pygame.Rect(20, 420, 128, 128)
self.shield_rect = pygame.Rect(20, 220, 128, 128)
self.logout_rect = pygame.Rect(20, 450, 128, 128)
self.cosmetico1_rect = pygame.Rect(750, 100, 64, 64)
self.cosmetico2_rect = pygame.Rect(750, 200, 64, 64)
self.cosmetico3_rect = pygame.Rect(750, 300, 64, 64)
self.opcoes_aberto = False
self.loja_aberta = False
self.login = Login()
def menu_principal(self):
#Loja
loja = pygame.image.load(os.path.join("imgs", "Loja.png"))
win.blit(loja, (120, 500))
mpos = pygame.mouse.get_pos()
if self.loja_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.loja_aberta = True
#Mascote
global mascote
win.blit(mascote, (0, 50))
mensagem = FONT_MASCOTE.render("Bem Vindo ao CodeQuiz!", True, "black")
win.blit(mensagem, (0, 0))
#Opcoes
engrenagem = pygame.image.load(os.path.join("imgs", "engrenagem.png"))
win.blit(engrenagem, (20, 500))
if self.engrenagem_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.opcoes_aberto = True
time.sleep(0.2)
def opcoes(self):
mpos = pygame.mouse.get_pos()
if self.tema_rect.collidepoint(mpos):
if pygame.mouse.get_pressed()[0]:
self.tema = "white"
elif self.tema_azul_rect.collidepoint(mpos):
if pygame.mouse.get_pressed()[0]:
self.tema = "cornflowerblue"
pygame.draw.rect(win, "black",[670, 95, 210, 110])
pygame.draw.rect(win, "white",[675, 100, 200, 100])
pygame.draw.rect(win, "black",[670, 245, 210, 110])
pygame.draw.rect(win, "cornflowerblue",[675, 250, 200, 100])
temas = FONT_LOGIN.render("Clique para mudar de tema", True, "black")
win.blit(temas,(450, 0))
pygame.draw.rect(win, "black",[398, 498, 79, 34], 0, 3)
pygame.draw.rect(win, "burlywood2",[400, 500, 75, 30], 0, 3)
# Define o valor true que mantem uma tela nova aberta como false para voltar para a anterior
voltar = FONT_LOGIN.render("Voltar", True, "white")
win.blit(voltar,(400, 500))
if self.voltar_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.opcoes_aberto = False
logout = pygame.image.load(os.path.join("imgs", "Exit.png"))
win.blit(logout, (20, 450))
if self.logout_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.opcoes_aberto = False
global logoff
logoff = True
global mascote
mascote = pygame.image.load(os.path.join("imgs", "Mascote.png"))
global cosmetico1_ok
global cosmetico2_ok
global cosmetico3_ok
cosmetico1_ok = False
cosmetico2_ok = False
cosmetico3_ok = False
time.sleep(0.2)
def loja(self):
mpos = pygame.mouse.get_pos()
texto = FONT_LOGIN.render("Clique em um item para comprar", True, "black")
win.blit(texto,(300, 0))
powerups = FONT_LOGIN.render("Powerup = 100 moedas", True, "black")
win.blit(powerups,(0, 50))
cosmeticos = FONT_LOGIN.render("Cosmeticos = 200 moedas", True, "black")
win.blit(cosmeticos,(550, 50))
pygame.draw.rect(win, "black",[398, 498, 79, 34], 0, 3)
pygame.draw.rect(win, "burlywood2",[400, 500, 75, 30], 0, 3)
voltar = FONT_LOGIN.render("Voltar", True, "white")
win.blit(voltar,(400, 500))
if self.voltar_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.loja_aberta = False
bonus = pygame.image.load(os.path.join("imgs", "Boost.png"))
win.blit(bonus, (20, 420))
bonus_texto = FONT_LOGIN.render("Boost de Pontos", True, "black")
win.blit(bonus_texto,(150, 420))
global boost
if self.boost_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0] and boost == False:
boost = True
protecao = pygame.image.load(os.path.join("imgs", "shield.png"))
win.blit(protecao, (20, 220))
protecao_texto = FONT_LOGIN.render("Proteção de Streak", True, "black")
win.blit(protecao_texto, (150, 220))
global shield
if self.shield_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0] and shield == False:
shield = True
global mascote
win.blit(mascote, (450, 200))
cosmetico1 = pygame.image.load(os.path.join("imgs", "cosmetic1.png"))
cosmetico2 = pygame.image.load(os.path.join("imgs", "cosmetic2.png"))
cosmetico3 = pygame.image.load(os.path.join("imgs", "cosmetic3.png"))
win.blit(cosmetico1, (750, 100))
win.blit(cosmetico2, (750, 200))
win.blit(cosmetico3, (750, 300))
global cosmetico1_desbloqueado
global cosmetico2_desbloqueado
global cosmetico3_desbloqueado
global cosmetico1_ok
global cosmetico2_ok
global cosmetico3_ok
if self.cosmetico1_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0] and cosmetico1_desbloqueado == False and cosmetico1_ok == False:
cosmetico1_desbloqueado = True
elif self.cosmetico1_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0] and cosmetico1_ok == True:
mascote = pygame.image.load(os.path.join("imgs", "Mascote1.png"))
if self.cosmetico2_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0] and cosmetico2_desbloqueado == False and cosmetico2_ok == False:
cosmetico2_desbloqueado = True
elif self.cosmetico2_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0] and cosmetico2_ok == True:
mascote = pygame.image.load(os.path.join("imgs", "Mascote2.png"))
if self.cosmetico3_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0] and cosmetico3_desbloqueado == False and cosmetico3_ok == False:
cosmetico3_desbloqueado = True
elif self.cosmetico3_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0] and cosmetico3_ok == True:
mascote = pygame.image.load(os.path.join("imgs", "Mascote3.png"))
class SeletorDeNivel():
def __init__(self):
self.voltar_rect_pergunta = pygame.Rect(400, 500, 100, 30)
self.lv1 = pygame.Rect(270, 70, 160, 160)
self.lv2 = pygame.Rect(470, 70, 160, 160)
self.lv3 = pygame.Rect(270, 245, 160, 160)
self.lv_endless = pygame.Rect(470, 245, 160, 160)
self.lv1_aberto = False
self.lv2_aberto = False
self.lv3_aberto = False
self.lv_endless_aberto = False
self.lv_aberto = False
self.lv2_desbloqueado = False
self.lv3_desbloqueado = False
def selecionar_nivel(self, xp):
# Desbloquea os niveis caso o jogador possua xp o suficiente
if xp >= 1000:
self.lv2_desbloqueado = True
if xp >= 2000:
self.lv3_desbloqueado = True
# Cadeado é colocado em cima da bolha de um nível caso ele não esteja desbloqueado
cadeado = pygame.image.load(os.path.join("imgs", "Lock.png"))
pygame.draw.rect(win, "dimgrey",[250, 0, 5 ,600])
pygame.draw.rect(win, "dimgrey",[650, 0, 5 ,600])
win.blit(FONT_LOGIN.render("Selecionar nivel", True, "black"), (350, 0))
pygame.draw.circle(win, "burlywood2",[350, 150], 80)
win.blit(FONT_NIVEL.render("1", True, "white"), (325, 90))
pygame.draw.circle(win, "burlywood2",[550, 325], 80)
openai = pygame.image.load(os.path.join("imgs", "OPENAI.png"))
win.blit(openai, (480, 255))
# Verifica se o nivel esta desbloqueado para mostrar o cadeado ou o nivel aberto
if self.lv2_desbloqueado:
pygame.draw.circle(win, "burlywood2",[550, 150], 80)
win.blit(FONT_NIVEL.render("2", True, "white"), (525, 90))
else:
pygame.draw.circle(win, "azure4",[550, 150], 80)
win.blit(cadeado, (525, 125))
if self.lv3_desbloqueado:
pygame.draw.circle(win, "burlywood2",[350, 325], 80)
win.blit(FONT_NIVEL.render("3", True, "white"), (325, 265))
else:
pygame.draw.circle(win, "azure4",[350, 325], 80)
win.blit(cadeado, (325, 300))
mpos = pygame.mouse.get_pos()
# Verifica qual nivel esta sendo aberto
if self.lv1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.lv_aberto = True
self.lv1_aberto = True
elif self.lv2.collidepoint(mpos) and pygame.mouse.get_pressed()[0] and xp >= 1000:
self.lv_aberto = True
self.lv2_aberto = True
elif self.lv3.collidepoint(mpos) and pygame.mouse.get_pressed()[0] and xp >= 2000:
self.lv_aberto = True
self.lv3_aberto = True
elif self.lv_endless.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.lv_aberto = True
self.lv_endless_aberto = True
# Deixa os valores de nivel aberto false caso o nivel seja fechado para parar de mostrar a tela do nivel em cima da tela do menu principal
if self.lv_aberto == False:
self.lv1_aberto = False
self.lv2_aberto = False
self.lv3_aberto = False
self.lv_endless_aberto = False
class Pergunta(SeletorDeNivel, Jogador):
def __init__(self):
self.voltar_ok = False
# Define as listas com as perguntas
self.perguntas_lv1 = ["7 // 2 vale quanto?", "print 'Hello, ', 'world', tera qual resultado no console?'", "10 % 2 vale quanto?", "Qual o simbolo utilizado para adicionar comentarios?", "100 / 0 vale quanto?"]
# Escolhe uma pergunta aleatória da lista
self.lv1_index = random.randint(0, len(self.perguntas_lv1) - 1)
self.perguntas_lv2 = ["print('Hello' + 'world') terá qual resultado?", "idade = 7 + 5 = 4, idade terá qual valor?", "7.5 // 2 vale quanto", "Como posso criar uma função em Python?", "Como posso contar a frequência de elementos em uma lista em Python?"]
self.lv2_index = random.randint(0, len(self.perguntas_lv2) - 1)
self.perguntas_lv3 = ["Como posso verificar se uma lista está vazia em Python?", "Como posso converter uma string em maiúsculas em Python?", "Como posso criar um dicionário vazio em Python?", "Como posso criar uma classe em python?", "Como faço para instalar um pacote externo em Python usando o pip?"]
self.lv3_index = random.randint(0, len(self.perguntas_lv3) - 1)
self.resp1 = pygame.Rect(10, 170, 200, 100)
self.resp2 = pygame.Rect(250, 170, 200, 100)
self.resp3 = pygame.Rect(10, 300, 200, 100)
self.resp4 = pygame.Rect(250, 300, 200, 100)
self.nova_pergunta = pygame.Rect(325, 425, 250, 30)
self.resposta = Resposta()
self.respostas_ok = False
self.pergunta_ok = False
self.correta = 0
self.acerto = False
self.erro = False
self.shuffle_ok = False
self.resp_certa = ""
self.respostas = []
def nivel(self, lv1_aberto, lv2_aberto, lv3_aberto, lv_endless_aberto, voltar_rect_pergunta, lv_aberto):
troca_ok = False
global level
global acertos
global streak
global shield
win.blit(FONT_LOGIN.render("Streak: " + str(streak), True, "black"), (600, 0))
mpos = pygame.mouse.get_pos()
if lv1_aberto:
level = 1
pygame.draw.rect(win, "azure4",[10, 170, 200, 100])
pygame.draw.rect(win, "azure4",[250, 170, 200, 100])
pygame.draw.rect(win, "azure4",[10, 300, 200, 100])
pygame.draw.rect(win, "azure4",[250, 300, 200, 100])
win.blit(FONT_LOGIN.render("Nivel 1", True, "black"), (400, 0))
win.blit(FONT_PERGUNTA.render(self.perguntas_lv1[self.lv1_index], True, "black"), (20, 40))
if self.perguntas_lv1[self.lv1_index] == "7 // 2 vale quanto?":
win.blit(FONT_PERGUNTA.render("3.5", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("3", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("Vai dar erro de compilação", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("4", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
acertos += 1
streak += 1
while troca_ok == False:
self.lv1_index = random.randint(0, len(self.perguntas_lv1) - 1)
if self.lv1_index != 0:
troca_ok = True
time.sleep(0.5)
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv1[self.lv1_index] == "print 'Hello, ', 'world', tera qual resultado no console?'":
win.blit(FONT_PERGUNTA.render("Hello, world", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("Hello, ", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("Vai dar erro de compilação", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("world", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
while troca_ok == False:
self.lv1_index = random.randint(0, len(self.perguntas_lv1) - 1)
if self.lv1_index != 1:
troca_ok = True
time.sleep(0.5)
acertos += 1
streak += 1
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv1[self.lv1_index] == "10 % 2 vale quanto?":
win.blit(FONT_PERGUNTA.render("0", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("5, ", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("0.2", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("Vai dar erro de compilação", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
acertos += 1
streak += 1
while troca_ok == False:
self.lv1_index = random.randint(0, len(self.perguntas_lv1) - 1)
if self.lv1_index != 2:
troca_ok = True
time.sleep(0.5)
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv1[self.lv1_index] == "Qual o simbolo utilizado para adicionar comentarios?":
win.blit(FONT_PERGUNTA.render("#", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("//", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("/* */", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("<!-- -->", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
acertos += 1
streak += 1
while troca_ok == False:
self.lv1_index = random.randint(0, len(self.perguntas_lv1) - 1)
if self.lv1_index != 3:
troca_ok = True
time.sleep(0.5)
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv1[self.lv1_index] == "100 / 0 vale quanto?":
win.blit(FONT_PERGUNTA.render("0", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("100", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("Vai dar erro de compilação", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("False", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
while troca_ok == False:
self.lv1_index = random.randint(0, len(self.perguntas_lv1) - 1)
if self.lv1_index != 4:
troca_ok = True
time.sleep(0.5)
acertos += 1
streak += 1
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif lv2_aberto:
level = 2
pygame.draw.rect(win, "azure4",[10, 170, 200, 100])
pygame.draw.rect(win, "azure4",[250, 170, 200, 100])
pygame.draw.rect(win, "azure4",[10, 300, 200, 100])
pygame.draw.rect(win, "azure4",[250, 300, 200, 100])
win.blit(FONT_LOGIN.render("Nivel 2", True, "black"), (400, 0))
win.blit(FONT_PERGUNTA.render(self.perguntas_lv2[self.lv2_index], True, "black"), (20, 40))
if self.perguntas_lv2[self.lv2_index] == "print('Hello' + 'world') terá qual resultado?":
win.blit(FONT_PERGUNTA.render("Hello world", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("Helloworld", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("Vai dar erro de compilação", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("Hello+world", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
acertos += 1
streak += 1
while troca_ok == False:
self.lv2_index = random.randint(0, len(self.perguntas_lv2) - 1)
if self.lv2_index != 0:
troca_ok = True
time.sleep(0.5)
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv2[self.lv2_index] == "idade = 7 + 5 = 4, idade terá qual valor?":
win.blit(FONT_PERGUNTA.render("idade = 4", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("idade = 12", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("Vai dar erro de sintaxe", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("idade = 8", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
while troca_ok == False:
self.lv2_index = random.randint(0, len(self.perguntas_lv2) - 1)
if self.lv2_index != 1:
troca_ok = True
time.sleep(0.5)
acertos += 1
streak += 1
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv2[self.lv2_index] == "7.5 // 2 vale quanto":
win.blit(FONT_PERGUNTA.render("3", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("3.5, ", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("4", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("Vai dar erro de compilação", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
acertos += 1
streak += 1
while troca_ok == False:
self.lv2_index = random.randint(0, len(self.perguntas_lv2) - 1)
if self.lv2_index != 2:
troca_ok = True
time.sleep(0.5)
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv2[self.lv2_index] == "Como posso criar uma função em Python?":
win.blit(FONT_PERGUNTA.render("def", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("func", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("method", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("class", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
acertos += 1
streak += 1
while troca_ok == False:
self.lv2_index = random.randint(0, len(self.perguntas_lv2) - 1)
if self.lv2_index != 3:
troca_ok = True
time.sleep(0.5)
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv2[self.lv2_index] == "Como posso contar a frequência de elementos em uma lista em Python?":
win.blit(FONT_PERGUNTA.render("len()", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("sum()", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("count()", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("find()", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
while troca_ok == False:
self.lv2_index = random.randint(0, len(self.perguntas_lv2) - 1)
if self.lv2_index != 4:
troca_ok = True
time.sleep(0.5)
acertos += 1
streak += 1
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif lv3_aberto:
level = 3
pygame.draw.rect(win, "azure4",[10, 170, 200, 100])
pygame.draw.rect(win, "azure4",[250, 170, 200, 100])
pygame.draw.rect(win, "azure4",[10, 300, 200, 100])
pygame.draw.rect(win, "azure4",[250, 300, 200, 100])
win.blit(FONT_LOGIN.render("Nivel 3", True, "black"), (400, 0))
win.blit(FONT_PERGUNTA.render(self.perguntas_lv3[self.lv3_index], True, "black"), (20, 40))
if self.perguntas_lv3[self.lv3_index] == "Como posso verificar se uma lista está vazia em Python?":
win.blit(FONT_PERGUNTA.render("is_empty()", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("len()", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("check_empty()", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("empty()", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
acertos += 1
streak += 1
while troca_ok == False:
self.lv3_index = random.randint(0, len(self.perguntas_lv3) - 1)
if self.lv3_index != 0:
troca_ok = True
time.sleep(0.5)
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv3[self.lv3_index] == "Como posso converter uma string em maiúsculas em Python?":
win.blit(FONT_PERGUNTA.render("uppercase()", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("convert_upper()", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("upper()", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("to_upper()", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
while troca_ok == False:
self.lv3_index = random.randint(0, len(self.perguntas_lv3) - 1)
if self.lv3_index != 1:
troca_ok = True
time.sleep(0.5)
acertos += 1
streak += 1
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv3[self.lv3_index] == "Como posso criar um dicionário vazio em Python?":
win.blit(FONT_PERGUNTA.render("dicionario = {}", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("dicionario = dict", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("dicionario = dict()", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("dicionario = []", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
acertos += 1
streak += 1
while troca_ok == False:
self.lv3_index = random.randint(0, len(self.perguntas_lv3) - 1)
if self.lv3_index != 2:
troca_ok = True
time.sleep(0.5)
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv3[self.lv3_index] == "Como posso criar uma classe em python?":
win.blit(FONT_PERGUNTA.render("class", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("def", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("public class", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("<nome_da_classe>", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
acertos += 1
streak += 1
while troca_ok == False:
self.lv3_index = random.randint(0, len(self.perguntas_lv3) - 1)
if self.lv3_index != 3:
troca_ok = True
time.sleep(0.5)
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.perguntas_lv3[self.lv3_index] == "Como faço para instalar um pacote externo em Python usando o pip?":
win.blit(FONT_PERGUNTA.render("pip install 'nomedopacote'", True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render("python -m pip install nomedopacote", True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render("pip install nomedopacote", True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render("pip install nomedopacote==versao", True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
while troca_ok == False:
self.lv3_index = random.randint(0, len(self.perguntas_lv3) - 1)
if self.lv3_index != 4:
troca_ok = True
time.sleep(0.5)
acertos += 1
streak += 1
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if shield:
shield = False
time.sleep(0.5)
else:
streak = 0
elif lv_endless_aberto:
win.blit(FONT_LOGIN.render("Nivel INF", True, "black"), (400, 0))
win.blit(FONT_LOGIN.render("Gerar outra pergunta", True, "black"), (325, 425))
pygame.draw.rect(win, "azure4",[10, 170, 200, 100])
pygame.draw.rect(win, "azure4",[250, 170, 200, 100])
pygame.draw.rect(win, "azure4",[10, 300, 200, 100])
pygame.draw.rect(win, "azure4",[250, 300, 200, 100])
# Gera uma nova pergunta
if self.nova_pergunta.collidepoint(mpos) and pygame.mouse.get_pressed()[0] or self.pergunta_ok == False:
self.pergunta_ok = True
self.shuffle_ok = False
gerar_texto_chatgpt()
# Tratamento de dados enviado pelo chatgpt
global completion
pattern = r"\n|\?|a\)|b\)|c\)|d\)"
string = completion.choices[0].message.content
elementos = re.split(pattern, string)
elementos = [element for element in elementos if element.strip()]
# Muda de ordem as respotas para a correta não ficar sempre como primeira
if not self.shuffle_ok:
self.resp_certa = elementos[1]
self.respostas.clear()
self.respostas.append(elementos[1])
self.respostas.append(elementos[2])
self.respostas.append(elementos[3])
self.respostas.append(elementos[4])
random.shuffle(self.respostas)
if self.resp_certa in self.respostas[0]:
self.correta = 1
elif self.resp_certa in self.respostas[1]:
self.correta = 2
elif self.resp_certa in self.respostas[2]:
self.correta = 3
elif self.resp_certa in self.respostas[3]:
self.correta = 4
self.shuffle_ok = True
pergunta = elementos[0]
win.blit(FONT_PERGUNTA.render(pergunta, True, "black"), (0, 50))
win.blit(FONT_PERGUNTA.render(self.respostas[0], True, "black"), (10, 170))
win.blit(FONT_PERGUNTA.render(self.respostas[1], True, "black"), (250, 170))
win.blit(FONT_PERGUNTA.render(self.respostas[2], True, "black"), (10, 300))
win.blit(FONT_PERGUNTA.render(self.respostas[3], True, "black"), (250, 300))
if self.resp1.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if self.correta == 1:
self.acerto = True
else:
self.erro = True
elif self.resp2.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if self.correta == 2:
self.acerto = True
else:
self.erro = True
elif self.resp3.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if self.correta == 3:
self.acerto = True
else:
self.erro = True
elif self.resp4.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
if self.correta == 4:
self.acerto = True
else:
self.erro = True
if self.acerto and self.erro == False:
msg = FONT_MOEDAS.render("Acertou :)", True, "black")
win.blit(msg, (720, 110))
self.erro = False
if self.erro and self.acerto == False:
msg = FONT_MOEDAS.render("Errou :(", True, "black")
win.blit(msg, (720, 110))
self.acerto = False
# Caso os dois ficassem verdadeiros por um bug, eles são definidos como False para arrumar o bug
if self.erro == True and self.acerto == True:
self.erro = False
self.acerto = False
if lv_aberto:
voltar = FONT_LOGIN.render("Voltar", True, "black")
win.blit(voltar,(400, 500))
if voltar_rect_pergunta.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.voltar_ok = True
# Abre o metodo do banco de dados para poder atualizar as moedas do jogador
login.banco_de_dados(login.moedas, login.xp)
acertos = 0
streak = 0
level = 0
# Escolhe uma nova pergunta aleatória caso o jogador saia do nível para não ficar na mesma
self.lv1_index = random.randint(0, len(self.perguntas_lv1) - 1)
self.lv2_index = random.randint(0, len(self.perguntas_lv2) - 1)
self.lv3_index = random.randint(0, len(self.perguntas_lv3) - 1)
self.respostas_ok = False
class Resposta(Pergunta):
def __init__(self):
pass
def calcular_pontos(self, acertos, streak, level):
formula = acertos * 100 * level * (1 + streak / 10)
global boost
global boost_ok
if boost and formula != 0:
pontos = formula * 1.25
boost = False
boost_ok = False
else:
pontos = formula
return pontos
def calcular_xp(self):
xp_novo = self.calcular_pontos(acertos, streak, level) / 10
return xp_novo
def calcular_moedas(self):
moedas_novo = self.calcular_pontos(acertos, streak, level) / 4
return moedas_novo
class Login(Pergunta):
global coins
# Método utilizado para permitir a sobrecarga de métodos no Python
def __init__(self):
self.inicio = False
self.login = False
self.cadastro = False
self.sair_rect = pygame.Rect(830, 0, 64, 64)
self.cadastrar_rect = pygame.Rect(500, 300, 200, 50)
self.login_rect = pygame.Rect(200, 300, 125, 50)
self.usuario_rect = pygame.Rect(90, 92, 600, 40)
self.senha_rect = pygame.Rect(90, 192, 600, 40)
self.voltar_rect = pygame.Rect(400, 500, 100, 30)
self.enviar_rect = pygame.Rect(400, 400, 100 , 30)
self.entrar_rect = pygame.Rect(350, 400, 150, 50)
self.usuario_click = False
self.senha_click = False
self.login_pronto = False
self.cadastro_pronto = False
self.entrar = True
self.senha = ""
self.usuario = ""
self.pergunta = Pergunta()
self.resposta = Resposta()
self.moedas = coins
self.xp = 0
def mostrar_xpmoedas(self):
xp = FONT_MOEDAS.render(str(self.xp), True, "black")
win.blit(xp, (765, 110))
moedas = FONT_MOEDAS.render(str(self.moedas), True, "black")
win.blit(moedas, (765, 210))
xp_img = pygame.image.load(os.path.join("imgs", "Xp.png"))
win.blit(xp_img, (700, 100))
moedas_img = pygame.image.load(os.path.join("imgs", "Coin.png"))
win.blit(moedas_img, (700, 200))
def banco_de_dados(self, moedas, xp):
# Conecta no banco de dados
try:
with psycopg.connect(
dbname="neondb",
user="Parelho",
password="ns3Nolki1RzC",
host="ep-little-field-610508.us-east-2.aws.neon.tech",
port= '5432'
) as db:
# Abre o cursor para verificar os valores das tabelas
with db.cursor() as cursor:
# Insere o cadastro no banco de dados
if self.cadastro_pronto == True:
add_usuario = "INSERT INTO Usuario VALUES(%s, %s, %s, %s);"
data_usuario = (self.usuario, self.senha, xp, moedas)
cursor.execute(add_usuario, data_usuario)
db.commit()
add_loja = "INSERT INTO Loja VALUES(%s);"
data_loja = (self.usuario,)
cursor.execute(add_loja, data_loja)
db.commit()
self.cadastro_pronto = False
# Verifica se o usuario e senha inseridos existem no banco de dados
if self.login_pronto:
query = "SELECT * FROM Usuario"
cursor.execute(query)
rows = cursor.fetchall()
usuario_encontrado = False
for row in rows:
if self.usuario == row[0] and self.senha == row[1]:
print("Usuario encontrado")
self.xp = int(row[2])
self.moedas = int(row[3])
self.login_pronto = False
self.inicio = False
self.login = False
usuario_encontrado = True
break
else:
if not usuario_encontrado:
print("Usuario nao encontrado")
self.login_pronto = False
# Caso o usuario sai de algum nível calcula a xp e moedas novas e atualiza no banco de dados
if pergunta.voltar_ok:
global acertos
global level
global streak
xp_nova = int(self.xp + self.resposta.calcular_xp())
query = f"UPDATE usuario SET xp = '{xp_nova}' WHERE username = '{self.usuario}';"
cursor.execute(query)
self.xp = xp_nova
moedas_nova = int(self.moedas + self.resposta.calcular_moedas())
query = f"UPDATE usuario SET moedas = '{moedas_nova}' WHERE username = '{self.usuario}';"
cursor.execute(query)
self.moedas = moedas_nova
# Desbloquea os cosmeticos e remove o custo do banco de dados
global cosmetico1_desbloqueado
if cosmetico1_desbloqueado:
if self.moedas < 200:
cosmetico1_desbloqueado = False
else:
coins_novo = self.moedas - 200
query = f"UPDATE usuario SET moedas = '{coins_novo}' WHERE username = '{self.usuario}';"
cursor.execute(query)
query = "UPDATE loja SET cosmetico1 = %s WHERE username = %s;"
data = (True, self.usuario)
cursor.execute(query, data)
self.moedas = coins_novo
cosmetico1_desbloqueado = False
global cosmetico2_desbloqueado
if cosmetico2_desbloqueado:
if self.moedas < 200:
cosmetico2_desbloqueado = False
else:
coins_novo = self.moedas - 200
query = f"UPDATE usuario SET moedas = '{coins_novo}' WHERE username = '{self.usuario}';"
cursor.execute(query)
query = "UPDATE loja SET cosmetico2 = %s WHERE username = %s;"
data = (True, self.usuario)
cursor.execute(query, data)
self.moedas = coins_novo
cosmetico2_desbloqueado = False
global cosmetico3_desbloqueado
if cosmetico3_desbloqueado:
if self.moedas < 200:
cosmetico3_desbloqueado = False
else:
coins_novo = self.moedas - 200
query = f"UPDATE usuario SET moedas = '{coins_novo}' WHERE username = '{self.usuario}';"
cursor.execute(query)
query = "UPDATE loja SET cosmetico3 = %s WHERE username = %s;"
data = (True, self.usuario)
cursor.execute(query, data)
self.moedas = coins_novo
cosmetico3_desbloqueado = False
global cosmetico1_ok
global cosmetico2_ok
global cosmetico3_ok
username = self.usuario
consulta_cosmetico1 = "SELECT cosmetico1 FROM loja WHERE username = %s;"
cursor.execute(consulta_cosmetico1, (username,))
resultado_cosmetico1 = cursor.fetchone()
# Executar a consulta para o cosmetico2
consulta_cosmetico2 = "SELECT cosmetico2 FROM loja WHERE username = %s;"
cursor.execute(consulta_cosmetico2, (username,))
resultado_cosmetico2 = cursor.fetchone()
# Executar a consulta para o cosmetico3
consulta_cosmetico3 = "SELECT cosmetico3 FROM loja WHERE username = %s;"
cursor.execute(consulta_cosmetico3, (username,))
resultado_cosmetico3 = cursor.fetchone()
# Verificar o resultado do cosmetico1
if resultado_cosmetico1 and resultado_cosmetico1[0] is True:
cosmetico1_ok = True
# Verificar o resultado do cosmetico2
if resultado_cosmetico2 and resultado_cosmetico2[0] is True:
cosmetico2_ok = True
# Verificar o resultado do cosmetico3
if resultado_cosmetico3 and resultado_cosmetico3[0] is True:
cosmetico3_ok = True
# Disconta o valor dos powerups do jogador no banco de dados
global boost
if boost:
if self.moedas < 100:
boost = False
else:
coins_novo = self.moedas - 100
query = f"UPDATE usuario SET moedas = '{coins_novo}' WHERE username = '{self.usuario}';"
cursor.execute(query)
self.moedas = coins_novo
global shield
global shield_ok
if shield:
if self.moedas < 100:
shield = False
else:
coins_novo = self.moedas - 100
query = f"UPDATE usuario SET moedas = '{coins_novo}' WHERE username = '{self.usuario}';"
cursor.execute(query)
self.moedas = coins_novo
shield_ok = True
except:
print("Erro de conexao com o banco de dados")
def fazer_login(self):
# Mostrando os campos de usuário e senha para o jogador
tela = FONT.render("Login", True, "black")
win.blit(tela, (400, 10))
pygame.draw.rect(win, "black",[90, 92, 600, 40], 2, 3)
usuario = FONT_LOGIN.render("Usuario: ", True, "black")
win.blit(usuario, (95, 92))
pygame.draw.rect(win, "black",[90, 192, 600, 40], 2, 3)
senha = FONT_LOGIN.render("Senha: ", True, "black")
win.blit(senha, (100, 192))
pygame.draw.rect(win, "black",[398, 498, 79, 34], 0, 3)
pygame.draw.rect(win, "burlywood2",[400, 500, 75, 30], 0, 3)
voltar = FONT_LOGIN.render("Voltar", True, "white")
win.blit(voltar,(400, 500))
pygame.draw.rect(win, "black",[398, 398, 84, 34], 0, 3)
pygame.draw.rect(win, "burlywood2",[400, 400, 80, 30], 0, 3)
enviar = FONT_LOGIN.render("Enviar", True, "white")
win.blit(enviar, (400, 400))
mpos = pygame.mouse.get_pos()
# Checa se o mouse está em cima do texto de voltar e se o jogador clicou com o botão esquerdo do mouse
if self.voltar_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.inicio = True
self.login = False
# Checa se o mouse está em cima do texto de usuário e se o jogador clicou com o botão esquerdo do mouse
if self.usuario_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
# usuario_click utilizado para o jogador não ter que ficar segurando o botão esquerdo do mouse para poder digitar, provavelmente existe uma solução melhor
self.usuario_click = True
# Caso o jogador clique fora da caixa, ela deixa de aceitar input
elif not self.usuario_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.usuario_click = False
textinput_usuario.cursor_visible = False
if self.usuario_click:
# Checa todas as frames se ouve alguma mudança na string
textinput_usuario.update(events)
# Coloca a string na tela
win.blit(textinput_usuario.surface, (200, 100))
# Checa se o mouse está em cima do texto de senha e se o jogador clicou com o botão esquerdo do mouse
if self.senha_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
# senha_click utilizado para o jogador não ter que ficar segurando o botão esquerdo do mouse para poder digitar, provavelmente existe uma solução melhor
self.senha_click = True
# Caso o jogador clique fora da caixa, ela deixa de aceitar input
elif not self.senha_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.senha_click = False
textinput_senha.cursor_visible = False
if self.senha_click:
# Checa todas as frames se ouve alguma mudança na string
textinput_senha.update(events)
# Coloca a string na tela
win.blit(textinput_senha.surface, (200, 200))
if self.enviar_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
time.sleep(1)
self.login_pronto = True
self.usuario = textinput_usuario.value
self.senha = textinput_senha.value
textinput_usuario.value = ""
textinput_senha.value = ""
# Similar ao método de login, fora a parte que está comentada
def fazer_cadastro(self):
tela = FONT.render("Cadastro", True, "black")
win.blit(tela, (350, 10))
pygame.draw.rect(win, "black",[90, 92, 600, 40], 2, 3)
usuario = FONT_LOGIN.render("Usuario: ", True, "black")
win.blit(usuario, (95, 92))
pygame.draw.rect(win, "black",[90, 192, 600, 40], 2, 3)
senha = FONT_LOGIN.render("Senha: ", True, "black")
win.blit(senha, (100, 192))
pygame.draw.rect(win, "black",[398, 498, 79, 34], 0, 3)
pygame.draw.rect(win, "burlywood2",[400, 500, 75, 30], 0, 3)
voltar = FONT_LOGIN.render("Voltar", True, "white")
win.blit(voltar,(400, 500))
pygame.draw.rect(win, "black",[398, 398, 84, 34], 0, 3)
pygame.draw.rect(win, "burlywood2",[400, 400, 80, 30], 0, 3)
enviar = FONT_LOGIN.render("Enviar", True, "white")
win.blit(enviar, (400, 400))
mpos = pygame.mouse.get_pos()
if self.voltar_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.inicio = True
self.cadastro = False
if self.usuario_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.usuario_click = True
elif not self.usuario_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.usuario_click = False
textinput_usuario.cursor_visible = False
if self.usuario_click:
textinput_usuario.update(events)
win.blit(textinput_usuario.surface, (200, 100))
if self.senha_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.senha_click = True
elif not self.senha_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
self.senha_click = False
textinput_senha.cursor_visible = False
if self.senha_click:
textinput_senha.update(events)
win.blit(textinput_senha.surface, (200, 200))
if self.enviar_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
time.sleep(1)
self.cadastro_pronto = True
self.usuario = textinput_usuario.value
self.senha = textinput_senha.value
self.cadastro = False
self.inicio = True
textinput_usuario.value = ""
textinput_senha.value = ""
def tela_inicio(self):
mpos = pygame.mouse.get_pos()
sair = pygame.image.load(os.path.join("imgs", "Close.png"))
win.blit(sair, (830, 0))
if self.sair_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
global fechar
fechar = True
bem_vindo = FONT.render("Efetue seu login ou cadastro", True, "black")
win.blit(bem_vindo, (165, 100))
pygame.draw.rect(win, "black",[498, 298, 199, 54], 0, 3)
pygame.draw.rect(win, "burlywood2",[500, 300, 195, 50], 0, 3)
cadastrar = FONT.render("Cadastrar", True, "white")
win.blit(cadastrar, (500, 300))
pygame.draw.rect(win, "black",[198, 298, 129, 64], 0, 3)
pygame.draw.rect(win, "burlywood2",[200, 300, 125, 60], 0, 3)
login = FONT.render("Login", True, "white")
win.blit(login, (200, 300))
# Checa se o mouse está em cima do botão de cadastro
global logoff
if logoff:
self.inicio = True
logoff = False
if self.cadastrar_rect.collidepoint(mpos):
# Checa se o jogador clicou com o botão esquerdo do mouse
if pygame.mouse.get_pressed()[0]:
self.cadastro = True
self.inicio = False
# Checa se o mouse está em cima do botão de login
elif self.login_rect.collidepoint(mpos):
# Checa se o jogador clicou com o botão esquerdo do mouse
if pygame.mouse.get_pressed()[0]:
self.login = True
self.inicio = False
def tela_boas_vindas(self):
bem_vindo = FONT.render("Bem-vindo ao CodeQuiz", True, "black")
win.blit(bem_vindo, (200, 100))
pygame.draw.rect(win, "black",[348, 398, 154, 54], 0, 3)
pygame.draw.rect(win, "burlywood2",[350, 400, 150, 50], 0, 3)
entrar = FONT.render("Entrar", True, "white")
win.blit(entrar, (360, 400))
mpos = pygame.mouse.get_pos()
if self.entrar_rect.collidepoint(mpos) and pygame.mouse.get_pressed()[0]:
time.sleep(0.2)
self.inicio = True
self.entrar = False
# Utilizado para criar a string que será utilizada pelo pygame_textinput
textinput_usuario = pygame_textinput.TextInputVisualizer()
textinput_senha = pygame_textinput.TextInputVisualizer()
running = True
jogador = Jogador()
login = Login()
nivel = SeletorDeNivel()
pergunta = Pergunta()
while running:
# Utilizado para ver os inputs do jogador
events = pygame.event.get()
# Fecha o loop caso a aba do pygame seja fechada
for event in events:
if event.type == pygame.QUIT or fechar == True:
running = False
# Coloca o tema do fundo na tela atrás de todo o resto que for desenhado
win.fill(jogador.tema)
if login.entrar:
login.tela_boas_vindas()
if logoff == True:
login.tela_inicio()
# Login().inicio é utilizado para ver se o a tela de boas vindas deve ser mostrada ou não
if login.inicio:
login.tela_inicio()
# Se login for True, será aberta a tela de login
elif login.login:
login.fazer_login()
if login.login_pronto:
login.banco_de_dados(login.moedas, login.xp)
# Se login for False, será aberta a tela de cadastro
elif login.cadastro:
login.fazer_cadastro()
if login.cadastro_pronto:
login.banco_de_dados(login.moedas, login.xp)
elif login.inicio == False and login.login == False and login.cadastro == False and login.entrar == False:
if jogador.opcoes_aberto == False and jogador.loja_aberta == False and nivel.lv_aberto == False:
jogador.menu_principal()
login.mostrar_xpmoedas()
nivel.selecionar_nivel(login.xp)
elif jogador.opcoes_aberto:
jogador.opcoes()
elif jogador.loja_aberta:
jogador.loja()
if boost == True and login.moedas >= 100 and boost_ok == False:
login.banco_de_dados(login.moedas, login.xp)
boost_ok = True
elif shield == True and login.moedas >= 100 and shield_ok == False:
login.banco_de_dados(login.moedas, login.xp)
elif cosmetico1_desbloqueado == True and login.moedas >= 200:
login.banco_de_dados(login.moedas, login.xp)
elif cosmetico2_desbloqueado == True and login.moedas >= 200:
login.banco_de_dados(login.moedas, login.xp)
elif cosmetico3_desbloqueado == True and login.moedas >= 200:
login.banco_de_dados(login.moedas, login.xp)
elif nivel.lv_aberto:
pergunta.nivel(nivel.lv1_aberto, nivel.lv2_aberto, nivel.lv3_aberto, nivel.lv_endless_aberto , nivel.voltar_rect_pergunta, nivel.lv_aberto)
if pergunta.voltar_ok:
nivel.lv_aberto = False
pergunta.voltar_ok = False
time.sleep(0.5)
# Da update nos métodos do pygame
pygame.display.update()
# Taxa de FPS, atualmente está 30 FPS
clock.tick(30)
# Para as operações do pygame para garantir que o código vai terminar de ser executado
pygame.quit() | [
"Uma pergunta simples com respostas curtas sobre programar em Python. de 4 alternativas, sendo apenas 1 correta informe a resposta correta na primeira alternativa"
] |
2024-01-10 | Eylon12345/ArticlesGPT. | main_embed.py | import arxiv
import requests
import PyPDF2
from io import BytesIO
import tiktoken
import os
import openai
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
openai.api_key = os.getenv("OPENAI_API_KEY")
def count_tokens(text):
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
return len(encoding.encode(text))
def sanitize_filename(filename):
invalid_chars = set(r'\/:*?"<>|')
sanitized_filename = "".join(c for c in filename if c not in invalid_chars)
sanitized_filename = "_".join(sanitized_filename.split())
return sanitized_filename
def create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def download_article_pdf(url):
response = requests.get(url)
pdf = PyPDF2.PdfReader(BytesIO(response.content))
text = ""
for page in pdf.pages:
text += page.extract_text()
return text
def sanitize_article_text(text):
references_index = text.upper().find("REFERENCES")
if references_index != -1:
text = text[:references_index]
return text
def save_article(save_path, text):
with open(save_path, "w", encoding="utf-8", errors="ignore") as f:
f.write(text)
def summarize_article(text):
# Check the number of tokens in the text
num_tokens = count_tokens(text)
# Limit the text to the first 15,000 tokens if it exceeds that limit
if num_tokens > 15000:
text = text[:15000]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
stream=True,
max_tokens=800,
messages=[
{"role": "system", "content": "You are a high-level AI assistant capable of comprehending and summarizing complex scientific content. Your task is to digest this scientific paper and present the information in an accessible, understandable manner. Bear in mind the need to translate technical language into layman's terms wherever possible, and to prioritize the main findings, implications, and novelty of the work."},
{"role": "user", "content": f"Here is a scientific paper that requires your expertise for short and clear summary, 5 uniqe bullet points, and the top 5 relevant keywords: {text}"}
]
)
responses = ''
for chunk in response:
if "content" in chunk["choices"][0]["delta"]:
r_text = chunk["choices"][0]["delta"]["content"]
responses += r_text
print(r_text, end='', flush=True)
return responses
def get_embedding(text):
response = openai.Embedding.create(
input=text,
model="text-embedding-ada-002"
)
return response["data"][0]["embedding"]
def search_similar_articles(query, df):
query_embedding = get_embedding(query)
similarities = cosine_similarity([query_embedding], df["embedding"].tolist())
top_index = similarities[0].argmax()
return df.iloc[top_index]
def main(keyword, n, save_directory):
print(f"MAIN! {keyword}")
create_directory(save_directory)
saved_filenames = set(os.listdir(save_directory))
search = arxiv.Search(
query=keyword,
max_results=n,
sort_by=arxiv.SortCriterion.SubmittedDate
)
df_old = pd.DataFrame()
# if csv file exists, read it in
if os.path.exists("summary_embeddings.csv"):
df_old = pd.read_csv("summary_embeddings.csv")
df_new = pd.DataFrame(columns=["title", "summary", "url", "embedding"])
for i, result in enumerate(search.results()):
filename = sanitize_filename(result.title) + ".txt"
print(f"TITLE: {result.title}")
if filename in saved_filenames:
print(f"Article {i+1} already saved.")
continue
text = download_article_pdf(result.pdf_url)
# print the token count of the article
print(f"Article {i+1} has {count_tokens(text)} tokens.")
text = sanitize_article_text(text)
# print the token count of the article after sanitization
print(f"Article {i+1} has {count_tokens(text)} tokens after sanitization.")
save_path = os.path.join(save_directory, filename)
save_article(save_path, text)
summary = summarize_article(text)
embedding = get_embedding(summary)
# append each new article to the df_new dataframe
df_new = df_new.append({"title": result.title, "summary": summary, "url": result.entry_id, "embedding": embedding}, ignore_index=True)
print(f"\nSummary of article {i+1}:\n{summary}\n")
summary_filename = filename.replace(".txt", "_summary.txt")
summary_save_path = os.path.join(save_directory, summary_filename)
save_article(summary_save_path, summary)
# concatenate new dataframe (df_new) with old dataframe (df_old), with new data on top
df_combined = pd.concat([df_new, df_old], ignore_index=True)
df_combined.to_csv("summary_embeddings.csv", index=False)
if __name__ == "__main__":
keyword = "Brain"
n = 100
save_directory = "saved_articles"
main(keyword, n, save_directory)
| [
"You are a high-level AI assistant capable of comprehending and summarizing complex scientific content. Your task is to digest this scientific paper and present the information in an accessible, understandable manner. Bear in mind the need to translate technical language into layman's terms wherever possible, and to prioritize the main findings, implications, and novelty of the work.",
"Here is a scientific paper that requires your expertise for short and clear summary, 5 uniqe bullet points, and the top 5 relevant keywords: "
] |
2024-01-10 | patterns-app/patterns-components | patterns_templates~Featured~AskHN~compute_embeddings.py | from patterns import (
Parameter,
State,
Table,
Connection,
)
import pinecone
import openai
import pickle
import time
from itertools import islice
import openai
from openai.error import RateLimitError
from patterns import Parameter, Table, State
state = State()
api_key = Parameter("pinecone_api_key")
openai_conn = Parameter("openai_api_key", type=Connection("openai"))
openai.api_key = openai_conn["api_key"]
docs = Table("docs")
pinecone.init(api_key=api_key, environment="us-east1-gcp")
pc_index_name = "hn-sample"
if pc_index_name not in pinecone.list_indexes():
try:
pinecone.create_index(pc_index_name, dimension=1536)
except Exception as e:
print(e)
# connect to index
index = pinecone.Index(pc_index_name)
def get_embedding(input_text: str) -> list[float]:
result = openai.Embedding.create(model="text-embedding-ada-002", input=input_text)
return result["data"][0]["embedding"]
inputs_stream = docs.as_stream()
inputs_iter = iter(inputs_stream)
retry_count = 0
while state.should_continue():
records: list[dict] = list(islice(inputs_iter, 10))
if not records:
break
try:
try:
response = openai.Embedding.create(
model="text-embedding-ada-002",
input=[r["doc"] for r in records],
)
except RateLimitError as e:
inputs_stream.rollback()
retry_count += 1
if retry_count > 30:
raise
print(f"Hit Rate limit, sleeping for {retry_count}s: {e}")
time.sleep(retry_count)
continue
vectors = [(str(records[data.index]["id"]), data.embedding) for data in response.data]
index.upsert(vectors)
inputs_stream.checkpoint()
except Exception:
inputs_stream.rollback()
raise
else:
state.request_new_run()
| [] |
2024-01-10 | patterns-app/patterns-components | patterns_templates~Featured%20and%20Starter~discord-search-with-openai-embed~find_matching_messages.py | import openai
from numpy import dot
from numpy.linalg import norm
from patterns import Parameter, State, Table, Connection
import json
discord_embeddings = Table("discord_embeddings", "r")
docs_embeddings = Table("docs_embeddings", "r")
issues_embeddings = Table("issues_embeddings", "r")
search_text_table = Table("search_text", "r")
search_text_stream = search_text_table.as_stream()
most_relevant_records = Table("most_relevant_records", "w")
all_relevant_records = Table("all_relevant_records", "w")
most_relevant_records.init(schema_hints={'id': 'Text', 'application_id': 'Text'})
n = Parameter("n", type=int, default=5)
model = Parameter("model", type=str, default="text-embedding-ada-002")
connection = Parameter("connection", type=Connection("openai"))
openai.api_key = connection.get("api_key")
record_sets = {
'discord': discord_embeddings,
'docs': docs_embeddings,
'issues': issues_embeddings,
}
def cosine_similarity(a, b):
return dot(a, b)/(norm(a)*norm(b))
for search_text_record in search_text_stream.consume_records():
search_text = search_text_record['search_text']
search_embedding = openai.Embedding.create(input=search_text, engine=model)['data'][0]['embedding']
updated_records = []
for record_type, record_set in record_sets.items():
records = record_set.read()
for record in records:
record['similarity'] = cosine_similarity(record['embedding'], search_embedding)
record['application_id'] = search_text_record['application_id']
record['interaction_token'] = search_text_record['interaction_token']
record['search_text'] = search_text
record['type'] = record_type
updated_records.append(record)
similarities = sorted(updated_records, key=lambda x: x['similarity'], reverse=True)
most_relevant_records.replace(similarities[:n])
all_relevant_records.replace(similarities)
| [] |
2024-01-10 | patterns-app/patterns-components | patterns_templates~Featured~algolia-gpt3-slackbot~text_complete.py | from patterns import (
Parameter,
State,
Table,
)
import openai
open_ai_api_key = Parameter('open_ai_api_key')
openai.api_key = open_ai_api_key
search_results = Table("search_results")
completions = Table("completions", "w")
answers = []
for match in search_results.as_stream().consume_records():
answers.append(match["content"])
question = match['thread']
slack_channel = match['slack_channel']
ts = match['ts']
# Feel free to tweak this how every you'd like:
prompt_template = (
"{answer}\n"
"{question}?\n"
)
prompt = prompt_template.format(question=question, answer="\n".join(answers))
completion = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=200,
temperature=0.75
)
completion['question'] = question
completion['slack_channel'] = slack_channel
completion['ts'] = ts
completion['completion'] = completion.choices[0].text
completions.append(completion)
| [
"\n",
"{answer}\n{question}?\n"
] |
2024-01-10 | patterns-app/patterns-components | patterns_templates~Featured~AskHN~query_embeddings.py | from patterns import (
Parameter,
State,
Table,
Connection,
)
import pinecone
import openai
discord_messages_filtered = Table("discord_messages_filtered", "r")
api_key = Parameter("pinecone_api_key")
openai_conn = Parameter("openai_api_key", type=Connection("openai"))
openai.api_key = openai_conn["api_key"]
docs = Table("docs")
discord_hits = Table("discord_hits", "w")
pinecone.init(api_key=api_key, environment="us-east1-gcp")
pc_index_name = "hn-sample"
index = pinecone.Index(pc_index_name)
def query(text):
response = openai.Embedding.create(
input=text,
model="text-embedding-ada-002"
)
embed = response['data'][0]['embedding']
resp = index.query(
vector=embed,
top_k=3,
include_values=False
)
return resp, embed
for msg in discord_messages_filtered.as_stream():
question = msg["question"]
resp, embed = query(question)
msg["matches"] = [{"id": m["id"], "score": m["score"]} for m in resp["matches"]]
msg["question_embedding"] = {"embedding": embed}
discord_hits.append(msg)
| [] |
2024-01-10 | ChanukyaVardhan/cloud-and-ml-RAG | gcp~python~instructor.py | from enum import Enum
from InstructorEmbedding import INSTRUCTOR
from langchain.text_splitter import RecursiveCharacterTextSplitter
import numpy as np
import torch
class InstructorModelType(Enum):
BASE = 'hkunlp/instructor-base'
LARGE = 'hkunlp/instructor-large'
XL = 'hkunlp/instructor-xl'
class Instructor:
def __init__(self, instructor_model_type: str, instruction: str, device: str = 'cpu'):
self.device = device
self.model = INSTRUCTOR(instructor_model_type).to(self.device)
self.instruction = instruction
self.text_splitter = RecursiveCharacterTextSplitter(
separators=[". ", "\n", " ", ""],
chunk_size=1800,
chunk_overlap=360, # 20% overlap
length_function=len,
)
self.chunk_batch_size = 5
def get_embedding(self, text, split_chunks = True):
if split_chunks:
chunks = self.text_splitter.split_text(text)
all_embeddings = []
for i in range(0, len(chunks), self.chunk_batch_size):
batch = chunks[i:i + self.chunk_batch_size]
embeddings = self._get_batch_embedding(batch, self.instruction)
all_embeddings.extend(embeddings)
return chunks, np.array(all_embeddings)
else:
with torch.no_grad():
outputs = self.model.encode([[self.instruction, text]])
return text, outputs
def _get_batch_embedding(self, texts, instruction):
# Pair each text with the common instruction
text_instruction_pairs = [{"instruction": instruction, "text": text} for text in texts]
# Prepare texts with instructions for the model
texts_with_instructions = [[pair["instruction"], pair["text"]] for pair in text_instruction_pairs]
with torch.no_grad():
customized_embeddings = self.model.encode(texts_with_instructions)
return customized_embeddings
| [] |
2024-01-10 | weisisheng/nesta | nesta~packages~cordis~cordis_api.py | """
Cordis API
==========
Extract all Cordis data via the API, by project.
"""
import requests
import pandas as pd
import json
from retrying import retry
from nesta.packages.decorators.ratelimit import ratelimit
from nesta.packages.misc_utils.camel_to_snake import camel_to_snake
from json.decoder import JSONDecodeError
from requests.exceptions import HTTPError
TOP_PREFIX = 'http://cordis.europa.eu/{}'
CSV_URL = TOP_PREFIX.format('data/cordis-{}projects.csv')
INFO_FIELDS = ['rcn', 'acronym', 'startDateCode',
'endDateCode', 'framework',
'fundedUnder', 'status', 'title',
'ecContribution', 'totalCost', 'website']
OBJS_FIELDS = ['fundingScheme', 'objective', 'projectDescription',
'topics', 'proposalCall']
REPS_FIELDS = ['rcn', 'finalResults', 'workPerformed',
'teaser', 'summary', 'title']
ORGS_FIELDS = ['activityType', 'address', 'contribution',
'country', 'name', 'organizationId',
'type', 'website']
USER_AGENT = ('Mozilla/5.0 (Linux; Android 6.0; '
'Nexus 5 Build/MRA58N) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/81.0.4044.92 '
'Mobile Safari/537.36')
def generate_id(text):
"""Deterministically generates an ID from a given text.
NOT guaranteed to be unique, but the alternative was to
either drop some data for not having IDs, or
generating uniquely on the fly: which is hard to do on
a batch system.
A negative integer is returned to avoid conflicts
with the data which have ids already. 8 digits are
returned, since 9 are the maximum allowed in the schema.
Args:
text (str): Text to convert to a negative 8-digit integer
Returns:
_id (int): A negative 8-digit integer.
"""
# Start from the second digit to allow for
_id = str(int.from_bytes(text.encode(), 'big',
signed=False))
end = 9 if len(_id) > 8 else None
start = 1 if len(_id) > 8 else None
return -int(_id[start:end])
@retry(stop_max_attempt_number=10)
@ratelimit(max_per_second=10)
def hit_api(api='', rcn=None, content_type=None):
"""
Hit the Cordis API by project code
Args:
api (str): Assumed to support '' (cordis) or 'openaire'.
rcn (str): RCN id of the project or entity to find.
content_type (str): contenttype argument for Cordis API
Returns:
data (json)
"""
url = TOP_PREFIX.format('api/details')
if api is not None:
url = f'{url}/{api}'
r = requests.get(url, params={'lang': 'en',
'rcn': rcn,
'paramType': 'rcn',
'contenttype': content_type},
headers={'User-Agent':USER_AGENT})
# Not all projects have data, so this is not an error
if r.status_code in (404, 500):
p = r.json()['payload']
if p['errorType'] == 'ica' or 'does not exist!' in p['message']:
return None
r.raise_for_status()
return r.json()['payload']
def extract_fields(data, fields):
"""
Extract specific fields and flatten data from Cordis API.
Args:
data (dict): A row of data to be processed.
fields (list): A list of fields to be extracted.
Returns:
out_data (dict): Flatter data, with specific fields extracted.
"""
out_data = {}
for field in fields:
if field not in data:
continue
value = data[field]
if type(value) is list:
value = [{k: _row[k] for k in ['title', 'rcn']}
for _row in value]
snake_field = camel_to_snake(field)
out_data[snake_field] = value
return out_data
def get_framework_ids(framework, nrows=None):
"""
Get all IDs of projects by funding framework.
Args:
framework (str): 'fp7' or 'h2020'
Returns:
ids (list)
"""
df = pd.read_csv(CSV_URL.format(framework),
nrows=nrows,
engine='c',
decimal=',', sep=';',
error_bad_lines=False,
warn_bad_lines=True,
encoding='latin')
col = 'rcn' if 'rcn' in df.columns else 'projectRcn'
return list(df[col])
def filter_pubs(pubs):
"""Remove publications without links, and merge
datasets and publications data together.
Also deduplicates publications based on pids.
Args:
pubs (dict): Publication data from OpenAIRE.
Returns:
_pubs (list): Flattened list of input data.
"""
_pubs, pids = [], set()
for p in pubs['datasets'] + pubs['publications']:
if 'pid' not in p:
continue
already_found = any(id in pids for id in p['pid'])
pids = pids.union(p['pid'])
if already_found or len(p['pid']) == 0:
continue
_pubs.append(dict(id=p['pid'][0], **p))
return _pubs
def fetch_data(rcn):
"""
Fetch all data (project, reports, orgs, publications)
for a given project id.
Args:
rcn (str): Project id.
Returns:
data (tuple): project, orgs, reports, pubs
"""
# Collect project info
_project = hit_api(rcn=rcn, content_type='project')
if _project is None:
return (None,None,None,None)
info = _project['information']
project = {**extract_fields(info, INFO_FIELDS),
**extract_fields(_project['objective'],
OBJS_FIELDS)}
# Collect organisations
orgs = []
oid_field = 'organizationId'
for _orgs in _project['organizations'].values():
for org in _orgs:
no_id_found = (oid_field not in org or
org[oid_field] == '')
if 'name' not in org and no_id_found:
continue
elif no_id_found:
org[oid_field] = generate_id(org['name'])
orgs.append(extract_fields(org, ORGS_FIELDS))
# Collect result reports
_reports = [hit_api(rcn=report['rcn'],
content_type='result')
for report in info['relatedResultsReport']]
reports = []
if _reports is not None:
reports = [extract_fields(rep, REPS_FIELDS)
for rep in _reports]
# Collect publications via OpenAIRE
try:
pubs = hit_api(api='openaire', rcn=rcn)
if pubs is None:
raise HTTPError
except (HTTPError, JSONDecodeError):
pubs = []
else:
pubs = filter_pubs(pubs)
return project, orgs, reports, pubs
| [] |
2024-01-10 | apirrone/ffmpeg_gpt | ffmpeg_gpt~ffmpeg_gpt.py | import openai
def get_bash_script(prompt, model="gpt-4"):
preprompt = """
You are a bash and ffmpeg expert, you never make mistakes.
Write a bash script that uses ffmpeg to perform the following actions.
Do not explain yourself, just write the script.
Be sure the script is working.
If not specified, the input file is named input.mp4 and the output file is named output.mp4.
Here are the actions requested :
"""
prompt = preprompt + prompt
res = openai.ChatCompletion.create(
model=model, messages=[{"role": "user", "content": prompt}]
)
return res.choices[0].message.content
| [
"\n You are a bash and ffmpeg expert, you never make mistakes. \n Write a bash script that uses ffmpeg to perform the following actions.\n Do not explain yourself, just write the script.\n Be sure the script is working. \n If not specified, the input file is named input.mp4 and the output file is named output.mp4.\n Here are the actions requested : \n\n\n prompt432ddccd-b341-4e70-92c1-b4b5680113ed",
"\n You are a bash and ffmpeg expert, you never make mistakes. \n Write a bash script that uses ffmpeg to perform the following actions.\n Do not explain yourself, just write the script.\n Be sure the script is working. \n If not specified, the input file is named input.mp4 and the output file is named output.mp4.\n Here are the actions requested : \n\n\n ",
"\n You are a bash and ffmpeg expert, you never make mistakes. \n Write a bash script that uses ffmpeg to perform the following actions.\n Do not explain yourself, just write the script.\n Be sure the script is working. \n If not specified, the input file is named input.mp4 and the output file is named output.mp4.\n Here are the actions requested : \n\n\n \n You are a bash and ffmpeg expert, you never make mistakes. \n Write a bash script that uses ffmpeg to perform the following actions.\n Do not explain yourself, just write the script.\n Be sure the script is working. \n If not specified, the input file is named input.mp4 and the output file is named output.mp4.\n Here are the actions requested : \n\n\n prompt34ef4462-ce5d-4f2d-8984-24c394bbf4b9"
] |
2024-01-10 | TylerRLowe/Fix-Numpy-Issues | platform~reworkd_platform~web~api~agent~tools~reason.py | from fastapi.responses import StreamingResponse as FastAPIStreamingResponse
from lanarky.responses import StreamingResponse
from langchain import LLMChain
from reworkd_platform.web.api.agent.model_settings import create_model
from reworkd_platform.web.api.agent.tools.tool import Tool
class Reason(Tool):
description = (
"Reason about task via existing information or understanding. "
"Make decisions / selections from options."
)
async def call(
self, goal: str, task: str, input_str: str
) -> FastAPIStreamingResponse:
from reworkd_platform.web.api.agent.prompts import execute_task_prompt
llm = create_model(self.model_settings, streaming=True)
chain = LLMChain(llm=llm, prompt=execute_task_prompt)
return StreamingResponse.from_chain(
chain,
{"goal": goal, "language": "English", "task": task},
media_type="text/event-stream",
)
| [] |
2024-01-10 | TylerRLowe/Fix-Numpy-Issues | platform~reworkd_platform~web~api~agent~agent_service~open_ai_agent_service.py | from typing import List, Optional
from lanarky.responses import StreamingResponse
from langchain.chains import LLMChain
from langchain.output_parsers import PydanticOutputParser
from loguru import logger
from reworkd_platform.schemas import ModelSettings
from reworkd_platform.web.api.agent.agent_service.agent_service import AgentService
from reworkd_platform.web.api.agent.analysis import Analysis
from reworkd_platform.web.api.agent.helpers import (
call_model_with_handling,
parse_with_handling,
)
from reworkd_platform.web.api.agent.model_settings import create_model
from reworkd_platform.web.api.agent.prompts import (
analyze_task_prompt,
create_tasks_prompt,
start_goal_prompt,
)
from reworkd_platform.web.api.agent.task_output_parser import TaskOutputParser
from reworkd_platform.web.api.agent.tools.tools import (
get_tool_from_name,
get_tools_overview,
get_user_tools,
)
from reworkd_platform.web.api.memory.memory import AgentMemory
class OpenAIAgentService(AgentService):
def __init__(self, model_settings: ModelSettings, agent_memory: AgentMemory):
self.model_settings = model_settings
self.agent_memory = agent_memory
self._language = model_settings.language or "English"
async def start_goal_agent(self, *, goal: str) -> List[str]:
completion = await call_model_with_handling(
self.model_settings,
start_goal_prompt,
{"goal": goal, "language": self._language},
)
task_output_parser = TaskOutputParser(completed_tasks=[])
tasks = parse_with_handling(task_output_parser, completion)
with self.agent_memory as memory:
memory.reset_class()
memory.add_tasks(tasks)
return tasks
async def analyze_task_agent(
self, *, goal: str, task: str, tool_names: List[str]
) -> Analysis:
llm = create_model(self.model_settings)
chain = LLMChain(llm=llm, prompt=analyze_task_prompt)
pydantic_parser = PydanticOutputParser(pydantic_object=Analysis)
print(get_tools_overview(get_user_tools(tool_names)))
completion = await chain.arun(
{
"goal": goal,
"task": task,
"language": self._language,
"tools_overview": get_tools_overview(get_user_tools(tool_names)),
}
)
print("Analysis completion:\n", completion)
try:
return pydantic_parser.parse(completion)
except Exception as error:
print(f"Error parsing analysis: {error}")
return Analysis.get_default_analysis()
async def execute_task_agent(
self,
*,
goal: str,
task: str,
analysis: Analysis,
) -> StreamingResponse:
print("Execution analysis:", analysis)
tool_class = get_tool_from_name(analysis.action)
return await tool_class(self.model_settings).call(goal, task, analysis.arg)
async def create_tasks_agent(
self,
*,
goal: str,
tasks: List[str],
last_task: str,
result: str,
completed_tasks: Optional[List[str]] = None,
) -> List[str]:
llm = create_model(self.model_settings)
chain = LLMChain(llm=llm, prompt=create_tasks_prompt)
completion = await chain.arun(
{
"goal": goal,
"language": self._language,
"tasks": tasks,
"lastTask": last_task,
"result": result,
}
)
previous_tasks = (completed_tasks or []) + tasks
task_output_parser = TaskOutputParser(completed_tasks=previous_tasks)
tasks = task_output_parser.parse(completion)
if not tasks:
logger.info(f"No additional tasks created: '{completion}'")
return tasks
unique_tasks = []
with self.agent_memory as memory:
for task in tasks:
similar_tasks = memory.get_similar_tasks(
task, score_threshold=0.98 # TODO: Once we use ReAct, revisit
)
# Check if similar tasks are found
if len(similar_tasks) == 0:
unique_tasks.append(task)
else:
logger.info(f"Similar tasks to '{task}' found: {similar_tasks}")
memory.add_tasks(unique_tasks)
return unique_tasks
| [] |
2024-01-10 | TylerRLowe/Fix-Numpy-Issues | platform~reworkd_platform~web~api~agent~model_settings.py | import openai
from langchain.chat_models import ChatOpenAI
from reworkd_platform.schemas import ModelSettings
from reworkd_platform.settings import settings
from reworkd_platform.web.api.agent.api_utils import rotate_keys
openai.api_base = settings.openai_api_base
def create_model(model_settings: ModelSettings, streaming: bool = False) -> ChatOpenAI:
return ChatOpenAI(
client=None, # Meta private value but mypy will complain its missing
openai_api_key=rotate_keys(
gpt_3_key=settings.openai_api_key,
gpt_4_key=settings.secondary_openai_api_key,
model=model_settings.model,
),
temperature=model_settings.temperature,
model=model_settings.model,
max_tokens=model_settings.max_tokens,
streaming=streaming,
)
| [] |
2024-01-10 | TylerRLowe/Fix-Numpy-Issues | platform~reworkd_platform~web~api~memory~weaviate.py | from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, cast
import numpy as np
import weaviate # type: ignore
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Weaviate
from loguru import logger
from weaviate import UnexpectedStatusCodeException
from reworkd_platform.settings import settings
from reworkd_platform.web.api.memory.memory import AgentMemory
def _default_schema(index_name: str, text_key: str) -> Dict[str, Any]:
return {
"class": index_name,
"properties": [
{
"name": text_key,
"dataType": ["text"],
}
],
}
CLASS_PREFIX = "Reworkd_AgentGPT_"
class WeaviateMemory(AgentMemory):
"""
Wrapper around the Weaviate vector database
"""
db: Optional[Weaviate] = None
def __init__(self, index_name: str):
self.index_name = CLASS_PREFIX + index_name
self.text_key = "agent_memory"
def __enter__(self) -> AgentMemory:
# If the database requires authentication, retrieve the API key
auth = (
weaviate.auth.AuthApiKey(api_key=settings.vector_db_api_key)
if settings.vector_db_api_key is not None
and settings.vector_db_api_key != ""
else None
)
self.client = weaviate.Client(settings.vector_db_url, auth_client_secret=auth)
self._create_class()
# Instantiate client with embedding provider
self.embeddings = OpenAIEmbeddings(
client=None, # Meta private value but mypy will complain its missing
openai_api_key=settings.openai_api_key,
)
self.db = Weaviate(
self.client,
self.index_name,
self.text_key,
embedding=self.embeddings,
by_text=False,
)
return self
def _create_class(self) -> None:
# Create the schema if it doesn't already exist
schema = _default_schema(self.index_name, self.text_key)
if not self.client.schema.contains(schema):
self.client.schema.create_class(schema)
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.client.__del__()
def add_tasks(self, tasks: List[str]) -> List[str]:
if self.db is None:
raise Exception("WeaviateMemory not initialized")
return self.db.add_texts(tasks)
def get_similar_tasks(self, query: str, score_threshold: float) -> List[str]:
# Get similar tasks
results = self._similarity_search_with_score(query)
def get_score(result: Tuple[str, float]) -> float:
return result[1]
results.sort(key=get_score, reverse=True)
# Return formatted response
return [text for [text, score] in results if score >= score_threshold]
def reset_class(self) -> None:
try:
self.client.schema.delete_class(self.index_name)
self._create_class()
except UnexpectedStatusCodeException as error:
logger.error(error)
def _similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[str, float]]:
"""
A remake of _similarity_search_with_score from langchain to use a near vector
"""
# Build query
query_obj = self.client.query.get(self.index_name, [self.text_key])
embedding = self.embeddings.embed_query(query)
vector = {"vector": embedding}
result = (
query_obj.with_near_vector(vector)
.with_limit(k)
.with_additional("vector")
.do()
)
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs_and_scores: list[tuple[str, float]] = []
for res in result["data"]["Get"][self.index_name]:
text = cast(str, res.pop(self.text_key))
score = float(np.dot(res["_additional"]["vector"], embedding))
docs_and_scores.append((text, score))
return docs_and_scores
| [] |
2024-01-10 | dmvassallo/EmbeddingScratchwork | tests~test_keys.py | #!/usr/bin/env python
"""Tests for the ``api_key`` property of the ``embed`` module."""
import contextlib
import logging
import os
from pathlib import Path
import string
import sys
from tempfile import TemporaryDirectory
import unittest
from unittest.mock import patch
import dulwich.porcelain
import openai
from parameterized import parameterized
import embed
from embed._keys import _get_key_if_available
from tests import _bases
if sys.version_info < (3, 11):
@contextlib.contextmanager
def _chdir(path):
"""Trivial non-reentrant version of ``contextlib.chdir`` for < 3.11."""
_old_pwd = Path().absolute()
os.chdir(path)
try:
yield
finally:
os.chdir(_old_pwd)
else:
_chdir = contextlib.chdir
class TestApiKey(_bases.TestBase):
"""Tests for ``embed.api_key``."""
def setUp(self):
"""
Save ``api_key`` attributes. Also pre-patch them, for log redaction.
"""
super().setUp()
# This cannot be done straightforwardly with unittest.mock.patch
# because that expects to be able to delete attributes, and the
# embed.api_key property (deliberately) has no deleter.
self._real_key_openai = openai.api_key
self._real_key_embed = embed.api_key
openai.api_key = 'sk-fake_redact_outer'
embed.api_key = 'sk-fake_redact_inner'
def tearDown(self):
"""Unpatch ``api_key`` attributes."""
embed.api_key = self._real_key_embed
openai.api_Key = self._real_key_openai
super().tearDown()
@parameterized.expand([
('str', 'sk-fake_setting_sets'),
('none', None),
])
def test_setting_on_embed_sets_on_openai(self, _name, pretend_key):
"""Setting ``embed.api_key`` sets both it and ``openai.api_key``."""
embed.api_key = pretend_key
with self.subTest('embed.api_key'):
self.assertEqual(embed.api_key, pretend_key)
with self.subTest('openai.api_key'):
self.assertEqual(openai.api_key, pretend_key)
@parameterized.expand([
('str', 'sk-fake_setting_does_not_set'),
('none', None),
])
def test_setting_on_openai_does_not_set_on_embed(self, _name, pretend_key):
"""Setting ``open.api_key`` does not change ``embed.api_key``."""
openai.api_key = pretend_key
self.assertNotEqual(embed.api_key, pretend_key)
_ONE_LETTER_DIR_NAMES = tuple(string.ascii_lowercase)
"""Directory names for testing, for below the point of interest."""
_TWO_CAP_LETTER_DIR_NAMES = tuple(ch * 2 for ch in string.ascii_uppercase)
"""Directory names for testing, for above the point of greatest interest."""
_THREE_LETTER_DIR_NAMES = tuple(ch * 3 for ch in string.ascii_lowercase)
"""Directory names for testing, higher above the point of greatest interest."""
def _create_and_enter_single_directory(name):
"""``mkdir`` one new subdirectory and ``cd`` into it."""
subdir = Path(name)
subdir.mkdir()
os.chdir(subdir)
def _create_and_enter_directories(names):
"""``mkdir`` each new subdirectory and ``cd`` into it. Builds a chain."""
for name in names:
_create_and_enter_single_directory(name)
def _write_fake_key_file(fake_key):
"""Write a ``.api_key`` file with a fake key, in the current directory."""
Path('.api_key').write_text(fake_key, encoding='utf-8')
class TestGetKeyIfAvailable(_bases.TestBase):
"""
Tests for the non-public ``embed._keys._get_key_if_available`` function.
These tests test the code that is used to determine the automatic initial
value of ``embed.api_key``.
The implementation logs extensively, but this does not currently test that.
"""
def setUp(self):
"""
Use a temporary directory; patch ``OPENAI_API_KEY``; quiet some logs.
"""
super().setUp()
# Create a temporary directory (that will be cleaned up) and cd to it.
# pylint: disable-next=consider-using-with
self.enterContext(_chdir(self.enterContext(TemporaryDirectory())))
# Patch OPENAI_API_KEY to a fake value in the environment.
environ_fragment = {'OPENAI_API_KEY': 'sk-fake_from_env'}
self.enterContext(patch.dict(os.environ, environ_fragment))
# Temporarily suppress embed._keys log messages less severe than error.
logger = logging.getLogger(embed._keys.__name__)
self.enterContext(patch.object(logger, 'level', logging.ERROR))
# pylint: disable=missing-function-docstring # Tests' names describe them.
def test_uses_env_var_when_no_key_file(self):
result = _get_key_if_available()
self.assertEqual(result, 'sk-fake_from_env')
def test_uses_env_var_instead_of_key_file(self):
_write_fake_key_file('sk-fake_from_file')
result = _get_key_if_available()
self.assertEqual(result, 'sk-fake_from_env')
def test_uses_key_file_in_cwd_when_no_env_var(self):
del os.environ['OPENAI_API_KEY']
_write_fake_key_file('sk-fake_from_file')
result = _get_key_if_available()
self.assertEqual(result, 'sk-fake_from_file')
def test_none_found_when_no_env_var_nor_key_file(self):
del os.environ['OPENAI_API_KEY']
result = _get_key_if_available()
self.assertIsNone(result)
def test_key_file_in_parent_when_no_repo_not_used(self):
del os.environ['OPENAI_API_KEY']
_write_fake_key_file('sk-fake_from_file')
_create_and_enter_single_directory('subdir')
result = _get_key_if_available()
self.assertIsNone(result)
@parameterized.expand([
(f'{above}above_{below}below', above, below)
for below in (0, 1, 2, 5) for above in (1, 2)
])
def test_key_file_outside_repo_not_used(self, _name, above, below):
del os.environ['OPENAI_API_KEY']
_write_fake_key_file('sk-fake_from_file')
_create_and_enter_directories(_TWO_CAP_LETTER_DIR_NAMES[:above])
dulwich.porcelain.init()
_create_and_enter_directories(_ONE_LETTER_DIR_NAMES[:below])
result = _get_key_if_available()
self.assertIsNone(result)
@parameterized.expand([
(f'{above}above_{below}below', above, below)
for below in (0, 1, 3) for above in (0, 1, 3)
])
def test_key_file_inside_repo_used_when_no_env_var(self, _name,
above, below):
del os.environ['OPENAI_API_KEY']
dulwich.porcelain.init()
_create_and_enter_directories(_TWO_CAP_LETTER_DIR_NAMES[:above])
_write_fake_key_file('sk-fake_from_file')
_create_and_enter_directories(_ONE_LETTER_DIR_NAMES[:below])
result = _get_key_if_available()
self.assertEqual(result, 'sk-fake_from_file')
@parameterized.expand([
(f'{above}above_{between}between_{below}below', above, between, below)
for below in (0, 1, 3) for between in (1, 2, 4) for above in (0, 1, 3)
])
def test_key_file_in_outer_nested_repo_not_used(self, _name,
above, between, below):
del os.environ['OPENAI_API_KEY']
_write_fake_key_file('sk-fake_from_file')
_create_and_enter_directories(_THREE_LETTER_DIR_NAMES[:above])
dulwich.porcelain.init() # Outer enclosing repo.
_create_and_enter_directories(_TWO_CAP_LETTER_DIR_NAMES[:between])
dulwich.porcelain.init() # Inner enclosed ("current") repo.
_create_and_enter_directories(_ONE_LETTER_DIR_NAMES[:below])
result = _get_key_if_available()
self.assertIsNone(result)
@parameterized.expand([
(f'{above}above_{between}between_{below}below', above, between, below)
for below in (0, 1, 3) for between in (0, 1, 3) for above in (1, 2, 4)
])
def test_key_file_in_inner_nested_repo_used_when_no_env_var(
self, _name, above, between, below,
):
del os.environ['OPENAI_API_KEY']
dulwich.porcelain.init() # Outer enclosing repo.
_create_and_enter_directories(_THREE_LETTER_DIR_NAMES[:above])
dulwich.porcelain.init() # Inner enclosed ("current") repo.
_create_and_enter_directories(_TWO_CAP_LETTER_DIR_NAMES[:between])
_write_fake_key_file('sk-fake_from_file')
_create_and_enter_directories(_ONE_LETTER_DIR_NAMES[:below])
result = _get_key_if_available()
self.assertEqual(result, 'sk-fake_from_file')
def test_prefers_env_var_to_any_files_in_repo(self):
_write_fake_key_file('sk-fake_from_file_parent')
dulwich.porcelain.init()
_create_and_enter_single_directory('subdir')
_write_fake_key_file('sk-fake_from_file_current')
result = _get_key_if_available()
self.assertEqual(result, 'sk-fake_from_env')
def test_prefers_current_dir_to_parent_in_repo(self):
del os.environ['OPENAI_API_KEY']
_write_fake_key_file('sk-fake_from_file_parent')
dulwich.porcelain.init()
_create_and_enter_single_directory('subdir')
_write_fake_key_file('sk-fake_from_file_current')
result = _get_key_if_available()
self.assertEqual(result, 'sk-fake_from_file_current')
@parameterized.expand([
('no_prefix', 'fake_from_file'),
('non_word_chars', 'sk-fake+from+file'),
])
def test_malformed_key_not_used_from_key_file(self, _name, malformed_key):
del os.environ['OPENAI_API_KEY']
_write_fake_key_file(malformed_key)
result = _get_key_if_available()
self.assertIsNone(result)
@parameterized.expand([
('no_prefix', 'fake_from_file_current'),
('non_word_chars', 'sk-fake+from+file+current'),
])
def test_skips_malformed_key_file_falls_back_to_ancestor_in_repo(
self, _name, malformed_key,
):
del os.environ['OPENAI_API_KEY']
_write_fake_key_file('sk-fake_from_file_parent')
dulwich.porcelain.init()
_create_and_enter_single_directory('subdir')
_write_fake_key_file(malformed_key)
result = _get_key_if_available()
self.assertEqual(result, 'sk-fake_from_file_parent')
if __name__ == '__main__':
unittest.main()
| [] |
2024-01-10 | dmvassallo/EmbeddingScratchwork | embed~_keys.py | """
API key helpers.
This enables the ``embed`` module to have an ``api_key`` property that, when
set, sets ``openai.api_key``. This is useful because code that consumes this
project's ``embed`` module shouldn't have to use ``openai`` directly or know
about ``openai.api_key``. (Setting ``openai.api_key`` to use ``requests``-based
functions, which don't use ``openai``, would be especially unintuitive.)
Code within the ``embed`` module itself may access ``_keys.api_key``.
"""
__all__ = ['api_key', 'initialize']
import logging
import os
from pathlib import Path
import re
import sys
import types
from typing import Any
import dulwich.repo
import openai
_API_KEY_REGEX = re.compile(r'sk-\w+')
"""Regular expression to check if file contents are an API key."""
_logger = logging.getLogger(__name__)
"""Logger for messages from this submodule (``embed._keys``)."""
api_key: Any = None
"""OpenAI API key. This should only be accessed from ``__init__.py``."""
def initialize(module_or_name):
"""
Give the module an ``api_key`` property and set it from the environment.
Setting the property sets ``openai.api_key`` (including this first time).
"""
if isinstance(module_or_name, str): # Because no match-case before 3.10.
module = sys.modules[module_or_name]
elif isinstance(module_or_name, types.ModuleType):
module = module_or_name
else:
raise TypeError(f'module_or_name is {type(module_or_name).__name__!r}')
# Give the module an api_key property that updates openai.api_key when set.
module.__class__ = _ModuleWithApiKeyProperty
# Try to set the property from the environment or a key file.
module.api_key = _get_key_if_available()
class _ModuleWithApiKeyProperty(types.ModuleType):
"""A module whose ``api_key`` property also sets ``openai.api_key``."""
@property
def api_key(self):
"""OpenAI API key."""
return api_key
@api_key.setter
def api_key(self, value):
# We really do want to write this submodule's api_key attribute.
# pylint: disable-next=global-statement
global api_key
api_key = openai.api_key = value
def _get_key_if_available():
"""
Get a reasonable initial value for the ``api_key`` property.
This checks for the ``OPENAI_API_KEY`` variable, an ``.api_key`` file in
the current directory, and ``.api_key`` files in higher directories within
the same Git repository (if the current directory is inside a Git
repository), in that order. If no key is found, ``None`` is returned.
"""
if key := os.getenv('OPENAI_API_KEY', '').rstrip():
_logger.info('API key found in OPENAI_API_KEY environment variable')
return key
_logger.debug('API key not found in OPENAI_API_KEY environment variable')
if key := _read_key_from_file(Path().absolute()):
return key
if key := _read_key_from_ancestors_within_repo():
return key
_logger.info('API key not set automatically (no key found)')
return None
def _read_key_from_file(directory):
"""Try to read an API key from an ``.api_key`` file in ``dir``."""
try:
key = (directory / '.api_key').read_text(encoding='utf-8').rstrip()
except FileNotFoundError:
_logger.debug('No API key file in: %s', directory)
return None
except OSError as error:
_logger.warning('%s', error)
return None
if _API_KEY_REGEX.fullmatch(key):
_logger.info('API key read from file in: %s', directory)
return key
_logger.warning('Malformed API key file in: %s', directory)
return None
def _read_key_from_ancestors_within_repo():
"""Try to read an API key in higher directories inside a Git repository."""
try:
repo = dulwich.repo.Repo.discover()
except dulwich.repo.NotGitRepository:
_logger.debug('Not in Git repository, stopping key file search')
return None
repo_directory = Path(repo.path).absolute()
directory = Path().absolute()
if directory == repo_directory:
return None # Searching this directory itself is done separately.
if repo_directory not in directory.parents:
_logger.error('Git repo status unclear for directory: %s', directory)
return None
while directory != repo_directory:
directory = directory.parent
if key := _read_key_from_file(directory):
return key
return None
| [] |
2024-01-10 | YJiangcm/FollowBench | code~llm_eval.py | import argparse
import json
import os
from tqdm import tqdm
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from gpt4_based_evaluation import acquire_discriminative_eval_input
from openai import OpenAI
MAX_API_RETRY = 5
def get_eval(user_prompt: str, max_tokens: int, api_key: str):
logging.basicConfig(level=logging.INFO)
for i in range(MAX_API_RETRY):
try:
client = OpenAI(api_key=api_key)
response = client.chat.completions.create(
model='gpt-4',
max_tokens=max_tokens,
temperature=0.0,
messages=[{
'role': 'user',
'content': user_prompt,
}],
)
content = response['choices'][0]['message']['content']
logger.info(content)
return content
except Exception as e:
logger.error(e)
logger.error(f'Failed after {MAX_API_RETRY} retries.')
return 'error'
def get_json_list(file_path):
file_path = os.path.expanduser(file_path)
with open(file_path, 'r') as f:
json_list = []
for line in f:
json_list.append(json.loads(line))
return json_list
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LLM-based evaluation.')
parser.add_argument('--api_key', type=str, required=True)
parser.add_argument('--max_tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
parser.add_argument("--model_path", type=str, required=True)
parser.add_argument("--constraint_types", nargs='+', type=str, default=['content', 'situation', 'style', 'format', 'mixed'])
parser.add_argument("--data_path", type=str, default="data")
parser.add_argument("--api_output_path", type=str, default="api_output")
parser.add_argument("--gpt4_discriminative_eval_input_path", type=str, default="gpt4_discriminative_eval_input")
parser.add_argument("--data_gpt4_discriminative_eval_input_path", type=str, default="data_gpt4_discriminative_eval_input")
parser.add_argument("--gpt4_discriminative_eval_output_path", type=str, default="gpt4_discriminative_eval_output")
args = parser.parse_args()
### convert api_output to LLM_based_eval_input
for constraint_type in args.constraint_types:
acquire_discriminative_eval_input(
data_path=args.data_path,
api_output_path=args.api_output_path,
constraint_type=constraint_type,
model_name=args.model_path,
data_gpt4_discriminative_eval_input_path=args.data_gpt4_discriminative_eval_input_path,
gpt4_discriminative_eval_input_path=args.gpt4_discriminative_eval_input_path
)
### LLM-based evaluation
if not os.path.exists(args.gpt4_discriminative_eval_output_path):
os.makedirs(args.gpt4_discriminative_eval_output_path)
for constraint_type in args.constraint_types:
eval_input = get_json_list(os.path.join(args.gpt4_discriminative_eval_input_path, "{0}_{1}_constraint.jsonl".format(args.model_path, constraint_type)))
with open(os.path.join(args.gpt4_discriminative_eval_output_path, "{0}_{1}_constraint.jsonl".format(args.model_path, constraint_type)), 'w') as output_file:
for idx in tqdm(range(len(eval_input))):
response = get_eval(eval_input[idx]['prompt_new'], args.max_tokens, args.api_key)
output_file.write(json.dumps({'prompt_new': eval_input[idx]['prompt_new'], "choices": [{"message": {"content": response}}]}) + '\n')
| [] |
2024-01-10 | YJiangcm/FollowBench | code_zh~llm_eval.py | import argparse
import json
import os
from tqdm import tqdm
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from gpt4_based_evaluation import acquire_discriminative_eval_input
from openai import OpenAI
MAX_API_RETRY = 5
def get_eval(user_prompt: str, max_tokens: int, api_key: str):
logging.basicConfig(level=logging.INFO)
for i in range(MAX_API_RETRY):
try:
client = OpenAI(api_key=api_key)
response = client.chat.completions.create(
model='gpt-4',
max_tokens=max_tokens,
temperature=0.0,
messages=[{
'role': 'user',
'content': user_prompt,
}],
)
content = response['choices'][0]['message']['content']
logger.info(content)
return content
except Exception as e:
logger.error(e)
logger.error(f'Failed after {MAX_API_RETRY} retries.')
return 'error'
def get_json_list(file_path):
file_path = os.path.expanduser(file_path)
with open(file_path, 'r') as f:
json_list = []
for line in f:
json_list.append(json.loads(line))
return json_list
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LLM-based evaluation.')
parser.add_argument('--api_key', type=str, required=True)
parser.add_argument('--max_tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
parser.add_argument("--model_path", type=str, required=True)
parser.add_argument("--constraint_types", nargs='+', type=str, default=['content', 'situation', 'style', 'format', 'mixed'])
parser.add_argument("--data_path", type=str, default="data_zh")
parser.add_argument("--api_output_path", type=str, default="api_output_zh")
parser.add_argument("--gpt4_discriminative_eval_input_path", type=str, default="gpt4_discriminative_eval_input_zh")
parser.add_argument("--data_gpt4_discriminative_eval_input_path", type=str, default="data_gpt4_discriminative_eval_input_zh")
parser.add_argument("--gpt4_discriminative_eval_output_path", type=str, default="gpt4_discriminative_eval_output_zh")
args = parser.parse_args()
### convert api_output to LLM_based_eval_input
for constraint_type in args.constraint_types:
acquire_discriminative_eval_input(
data_path=args.data_path,
api_output_path=args.api_output_path,
constraint_type=constraint_type,
model_name=args.model_path,
data_gpt4_discriminative_eval_input_path=args.data_gpt4_discriminative_eval_input_path,
gpt4_discriminative_eval_input_path=args.gpt4_discriminative_eval_input_path
)
### LLM-based evaluation
if not os.path.exists(args.gpt4_discriminative_eval_output_path):
os.makedirs(args.gpt4_discriminative_eval_output_path)
for constraint_type in args.constraint_types:
eval_input = get_json_list(os.path.join(args.gpt4_discriminative_eval_input_path, "{0}_{1}_constraint.jsonl".format(args.model_path, constraint_type)))
with open(os.path.join(args.gpt4_discriminative_eval_output_path, "{0}_{1}_constraint.jsonl".format(args.model_path, constraint_type)), 'w') as output_file:
for idx in tqdm(range(len(eval_input))):
response = get_eval(eval_input[idx]['prompt_new'], args.max_tokens, args.api_key)
output_file.write(json.dumps({'prompt_new': eval_input[idx]['prompt_new'], "choices": [{"message": {"content": response}}]}) + '\n')
| [] |
2024-01-10 | Algalyq/FastAPI | app~api~adapters~lang_service.py | from langchain.agents import load_tools
import os
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, SQLDatabase, SQLDatabaseChain
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langchain.utilities import SerpAPIWrapper
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
os.environ["OPENAI_KEY"] = os.getenv("OPENAI_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
os.environ["SERPAPI_API_KEY"] = os.getenv("SERPAPI_KEY")
wolf = os.getenv("WOLFRAM_ALPHA")
os.environ["WOLFRAM_ALPHA_APPID"] = os.getenv("WOLFRAM_ALPHA")
class LangService:
def __init__(self):
pass
def model(self,query: str):
# llm = OpenAI(temperature=0)
# tools = load_tools(["serpapi","llm-math","wolfram-alpha"],llm=llm,serpapi_api_key=serpapi,
# wolfram_alpha_appid=wolf)
# agent = initialize_agent(tools,llm, agent="zero-shot-react-description",verbose=True)
# return agent.run(query,length=256)
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
search = SerpAPIWrapper()
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
wolfram = WolframAlphaAPIWrapper()
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events. You should ask targeted questions"
),
Tool(
name="ChatGPT",
func=llm_math_chain.run,
description="useful for when you need to answer questions that can answer ChatGPT"
),
Tool(
name="Wolf",
func=wolfram.run,
description="useful for when you need to answer questions about math"
)
]
agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)
return agent.run(query)
| [] |
2024-01-10 | Goblincomet/GPT-documents | scripts~parser~py2doc.py | import ast
import os
from pathlib import Path
import tiktoken
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_def = ast.get_source_segment(source_code, node)
functions[func_name] = func_def
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
class_name = node.name
function_names = []
for subnode in ast.walk(node):
if isinstance(subnode, ast.FunctionDef):
function_names.append(subnode.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict
def parse_functions(functions_dict, formats, dir):
c1 = len(functions_dict)
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
print(f"Processing file {i}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for j, (name, function) in enumerate(functions.items(), start=1):
print(f"Processing function {j}/{len(functions)}")
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=function))
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
with open(f"outputs/{source_w}", mode) as f:
f.write(
f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
def parse_classes(classes_dict, formats, dir):
c1 = len(classes_dict)
for i, (source, classes) in enumerate(classes_dict.items()):
print(f"Processing file {i + 1}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for name, function_names in classes.items():
print(f"Processing Class {i + 1}/{c1}")
prompt = PromptTemplate(
input_variables=["class_name", "functions_names"],
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(class_name=name, functions_names=function_names))
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
def transform_to_docs(functions_dict, classes_dict, formats, dir):
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
total_price = ((num_tokens / 1000) * 0.02)
print(f"Number of Tokens = {num_tokens:,d}")
print(f"Approx Cost = ${total_price:,.2f}")
user_input = input("Price Okay? (Y/N)\n").lower()
if user_input == "y" or user_input == "":
if not Path("outputs").exists():
Path("outputs").mkdir()
parse_functions(functions_dict, formats, dir)
parse_classes(classes_dict, formats, dir)
print("All done!")
else:
print("The API was not called. No money was spent.")
| [
"Code: \n{code}, \nDocumentation: ",
"functions_names",
"class_name",
"Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: "
] |
2024-01-10 | Goblincomet/GPT-documents | scripts~code_docs_gen.py | import ast
import json
from pathlib import Path
import dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
dotenv.load_dotenv()
ps = list(Path("inputs").glob("**/*.py"))
data = []
sources = []
for p in ps:
with open(p) as f:
data.append(f.read())
sources.append(p)
def get_functions_in_class(node):
functions = []
functions_code = []
for child in node.body:
if isinstance(child, ast.FunctionDef):
functions.append(child.name)
functions_code.append(ast.unparse(child))
return functions, functions_code
def get_classes_and_functions(source_code):
tree = ast.parse(source_code)
classes = {}
for node in tree.body:
if isinstance(node, ast.ClassDef):
class_name = node.name
function_name, function = get_functions_in_class(node)
# join function name and function code
functions = dict(zip(function_name, function))
classes[class_name] = functions
return classes
structure_dict = {}
c1 = 0
for code in data:
classes = get_classes_and_functions(ast.parse(code))
source = str(sources[c1])
structure_dict[source] = classes
c1 += 1
# save the structure dict as json
with open('structure_dict.json', 'w') as f:
json.dump(structure_dict, f)
if not Path("outputs").exists():
Path("outputs").mkdir()
c1 = len(structure_dict)
c2 = 0
for source, classes in structure_dict.items():
c2 += 1
print(f"Processing file {c2}/{c1}")
f1 = len(classes)
f2 = 0
for class_name, functions in classes.items():
f2 += 1
print(f"Processing class {f2}/{f1}")
source_w = source.replace("inputs/", "")
source_w = source_w.replace(".py", ".txt")
if not Path(f"outputs/{source_w}").exists():
with open(f"outputs/{source_w}", "w") as f:
f.write(f"Class: {class_name}")
else:
with open(f"outputs/{source_w}", "a") as f:
f.write(f"\n\nClass: {class_name}")
# append class name to the front
for function in functions:
b1 = len(functions)
b2 = 0
print(f"Processing function {b2}/{b1}")
b2 += 1
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=functions[function]))
if not Path(f"outputs/{source_w}").exists():
with open(f"outputs/{source_w}", "w") as f:
f.write(f"Function: {functions[function]}, \nDocumentation: {response}")
else:
with open(f"outputs/{source_w}", "a") as f:
f.write(f"\n\nFunction: {functions[function]}, \nDocumentation: {response}")
| [
"Code: \n{code}, \nDocumentation: "
] |
2024-01-10 | Goblincomet/GPT-documents | application~parser~py2doc.py | import ast
import os
from pathlib import Path
import tiktoken
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_def = ast.get_source_segment(source_code, node)
functions[func_name] = func_def
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
class_name = node.name
function_names = []
for subnode in ast.walk(node):
if isinstance(subnode, ast.FunctionDef):
function_names.append(subnode.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict
def parse_functions(functions_dict, formats, dir):
c1 = len(functions_dict)
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
print(f"Processing file {i}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for j, (name, function) in enumerate(functions.items(), start=1):
print(f"Processing function {j}/{len(functions)}")
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=function))
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
with open(f"outputs/{source_w}", mode) as f:
f.write(
f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
def parse_classes(classes_dict, formats, dir):
c1 = len(classes_dict)
for i, (source, classes) in enumerate(classes_dict.items()):
print(f"Processing file {i + 1}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for name, function_names in classes.items():
print(f"Processing Class {i + 1}/{c1}")
prompt = PromptTemplate(
input_variables=["class_name", "functions_names"],
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(class_name=name, functions_names=function_names))
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
def transform_to_docs(functions_dict, classes_dict, formats, dir):
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
total_price = ((num_tokens / 1000) * 0.02)
print(f"Number of Tokens = {num_tokens:,d}")
print(f"Approx Cost = ${total_price:,.2f}")
user_input = input("Price Okay? (Y/N)\n").lower()
if user_input == "y" or user_input == "":
if not Path("outputs").exists():
Path("outputs").mkdir()
parse_functions(functions_dict, formats, dir)
parse_classes(classes_dict, formats, dir)
print("All done!")
else:
print("The API was not called. No money was spent.")
| [
"Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
"functions_names",
"class_name",
"Code: \n{code}, \nDocumentation: "
] |
2024-01-10 | ans9611/BibleInfluenceML | src~preprocessing~wordCloud.py | import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import glob
import string
import gensim
from gensim.corpora import Dictionary
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel, Phrases, LdaModel
from gensim.models.ldamulticore import LdaMulticore
import pandas as pd
from num2words import num2words
import numpy as np
from wordcloud import WordCloud
from sklearn.feature_extraction.text import CountVectorizer
import seaborn as sbn
import matplotlib.pyplot as plt
from scipy.stats import hmean
from scipy.stats import norm
# ... (rest of your code)
data = []
ifiles = glob.glob("../../data/*.txt")
for ifile in ifiles:
book = open(ifile, "r").read().strip()
data.append(book)
stop_words = stopwords.words('english')
stop_words.extend(["thy","thou","thee", "hath", "upon", "me", "him", "them", "shall","ye", "one", "unto", "us"])
def remove_stopwords(text, stop_words):
outtext = ' '.join([word for word in text.split() if word not in stop_words])
return outtext
for i, book in enumerate(data, 0):
# remove NUMBER:NUMBER. pattern at the beginning
data[i] = re.sub(r"\d{1,}\:\d{1,}\.", "",data[i])
# remove NAME Chapter NUMBER
data[i] = re.sub(r"\w{1,} Chapter \d{1,}","",data[i] )
#lower case
data[i] = data[i].lower()
# remove punctuation
data[i] = data[i].translate(str.maketrans('', '', string.punctuation))
# remove new lines
data[i] = re.sub('\s+', " ", data[i])
# remove new line
data[i] = re.sub(r"\\n", " ", data[i])
# remove stopwords
data[i] = ' '.join([word for word in data[i].split() if word not in stop_words]) #remove_stopwords(data[i], stop_words)
big_string = ""
for book in data:
big_string += book + " "
output_file = "../../data/bible_corpus.txt"
with open(output_file, "w") as file:
file.write(big_string)
# wordcloud = WordCloud(width=1600, height=800,max_font_size=200).generate(big_string)
# plt.figure(figsize=(12,10))
# plt.imshow(wordcloud, interpolation="bilinear")
# plt.axis("off")
# plt.show() | [] |
2024-01-10 | karleeov/codex-model | codex.py | import streamlit as st
import openai
# Configurations
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # Store your API key as an environment variable for security
if not OPENAI_API_KEY:
raise ValueError("OpenAI API key not found in environment variables.")
openai.api_type = "azure"
openai.api_base = "https://example.openai.azure.com/"
openai.api_version = "2023-03-15-preview"
openai.api_key = OPENAI_API_KEY
def get_codex_response(prompt_text):
"""Get response from Codex for the given prompt."""
try:
response = openai.Completion.create(
engine="code",
prompt=prompt_text,
temperature=0.7, # Adjusted for slightly more deterministic output
max_tokens=2000, # Increased for longer responses
top_p=1,
frequency_penalty=0,
presence_penalty=0,
best_of=1,
stop=["Human:", "AI:"]
)
return response.choices[0].text.strip()
except Exception as e:
st.error(f"An error occurred: {e}")
return None
def main():
st.title("Hello karl using OpenAI Codex")
# User input
name = st.text_input("Describe the functionality you want in the code (e.g. 'a function to sort a list of numbers')")
if name:
# Provide feedback while API call is made
with st.spinner("Generating code..."):
prompt_text = f"\"\"\"\nWrite a detailed Python function for: {name}\n\"\"\""
code_response = get_codex_response(prompt_text)
if code_response:
st.code(code_response, language='python')
if __name__ == "__main__":
main()
| [
"\"\"\"\nWrite a detailed Python function for: PLACEHOLDER\n\"\"\""
] |
2024-01-10 | TestSavant/BusinessProfiler | business_profiler.py | # Importing the necessary Python libraries
import os
import yaml
import openai
import gradio as gr
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.utilities import WikipediaAPIWrapper
## OPENAI API CONNECTION
## ---------------------------------------------------------------------------------------------------------------------
# Loading the API key and organization ID from file (NOT pushed to GitHub)
# with open('../keys/openai-keys.yaml') as f:
# keys_yaml = yaml.safe_load(f)
# Setting the OpenAI API key as an environment variable
def initialize_openai_key():
with open('.env', 'r') as f:
env_file = f.readlines()
envs_dict = {key.strip("'"): value.strip("\n") for key, value in [(i.split('=')) for i in env_file]}
os.environ['OPENAI_API_KEY'] = envs_dict['OPENAI_API_KEY']
# = keys_yaml['API_KEY']
# Using LangChain to connect to OpenAI
openai_llm = OpenAI()
## LANGCHAIN CONFIGURATION
## ---------------------------------------------------------------------------------------------------------------------
# Setting up the prompt template for retreiving the input company's top 5 competitors
top_5_competitors_template = PromptTemplate(
input_variables = ['company_name'],
template = 'Which 5 companies are {company_name}\'s largest competitors? Give me a numbered list with a brief summary of the competitor\'s strategic advantage.'
)
# Setting up the prompt template for developing five business strategies for the input company
top_5_strategies_template = PromptTemplate(
input_variables = ['company_name'],
template = 'Give me five business strategies for {company_name} to grow their business. Provide me the answer as a bulleted list with brief explanations around each strategy. Do not provide any text before the bulleted list and do not pad with newline characters.'
)
# Setting up the prompt template for generating five specific business actions around the first strategy generated from the previous prompt template
top_5_business_actions_template = PromptTemplate(
input_variables = ['business_strategies'],
template = 'From this list of business strategies:\n {business_strategies}\n Please develop 5 specific actions to take around the first strategy. Provide me the answer as a bulleted list with brief explanations for each action. Do not provide any text before the bulleted list and do not pad with newline characters.'
)
# Setting up the prompt template to retrieve a summary of the company history from the Wikipedia information
company_history_template = PromptTemplate(
input_variables = ['wikipedia_information'],
template = 'Please provide me a summary of the company history from this body of Wikipedia information: {wikipedia_information}'
)
# Setting up the prompt template to re-write company history as Jar Jar Binks from Star Wars
jar_jar_history_template = PromptTemplate(
input_variables = ['company_history'],
template = 'Please re-write the following summary in the voice of Jar Jar Binks from Star Wars: {company_history}. Do not add any newline characters at the beginning of your response.'
)
# Setting up the prompt template for generating a catchy jingle about the company from its Wikipedia information
jingle_template = PromptTemplate(
input_variables = ['company_name', 'wikipedia_information'],
template = 'Please write a catchy jingle for {company_name}. Here is some additional information about the company to help you write it: {wikipedia_information}. Do not show me the word "Jingle" at the beginning nor pad the beginning of your response with newline characters.'
)
# Setting up the prompt template to generate HTML code to create a simple website for displaying the company's summary information
html_summary_template = PromptTemplate(
input_variables = ['company_history'],
template = 'Please provide me HTML code to display a company\'s historical summary. Give me only the HTML code and nothing else. Here is the historical summary for context: {company_history}'
)
# Instantiating the LangChains for all the prompt templates defined above
top_5_competitors_chain = LLMChain(llm = openai_llm, prompt = top_5_competitors_template, verbose = True, output_key = 'top_5_competitors')
top_5_strategies_chain = LLMChain(llm = openai_llm, prompt = top_5_strategies_template, verbose = True, output_key = 'business_strategies')
top_5_business_actions_chain = LLMChain(llm = openai_llm, prompt = top_5_business_actions_template, verbose = True, output_key = 'business_actions')
company_history_chain = LLMChain(llm = openai_llm, prompt = company_history_template, verbose = True, output_key = 'company_history')
jar_jar_history_chain = LLMChain(llm = openai_llm, prompt = jar_jar_history_template, verbose = True, output_key = 'jar_jar_history')
jingle_chain = LLMChain(llm = openai_llm, prompt = jingle_template, verbose = True, output_key = 'jingle')
html_summary_chain = LLMChain(llm = openai_llm, prompt = html_summary_template, verbose = True, output_key = 'html_code_template')
# Instantiating an object to obtain results from the Wikipedia API
wikipedia_api = WikipediaAPIWrapper(top_k_results = 1)
def clear_results():
'''
Clears the results from the page
Inputs:
- company_name (str): Name of the company
- top_5_competitors (str): A bulleted list of the top 5 competitors
- business_strategies (str): A list of the top 5 business strategies
- business_actions (str): A list of top 5 business actions to take against first business strategy defined
- company_history (str): History about the company
- jar_jar_history (str): History about the company in the voice of Jar Jar Binks
- jingle (str): A jingle written aboutt the company
- html_summary_code (str): Code written about the summary to be displayed in HTML
Returns:
- company_name (str): Cleared out field ready for next company
- top_5_competitors (str): Cleared out field ready for next company
- business_strategies (str): Cleared out field ready for next company
- business_actions (str): Cleared out field ready for next company
- company_history (str): Cleared out field ready for next company
- jar_jar_history (str): Cleared out field ready for next company
- jingle (str): Cleared out field ready for next company
- html_summary_code (str): Cleared out field ready for next company
'''
# Clearing out the results for each field
company_name = ''
top_5_competitors = ''
business_strategies = ''
business_actions = ''
company_history = ''
jar_jar_history = ''
jingle = ''
html_summary_code = ''
return company_name, top_5_competitors, business_strategies, business_actions, company_history, jar_jar_history, jingle, html_summary_code
## HELPER FUNCTIONS
## ---------------------------------------------------------------------------------------------------------------------
def generate_business_profile(company_name_prompt):
'''
Generates the business profile for the inputted Fortune 500 company
Inputs:
- company_name_prompt (str): The name of the Fortune 500 company submitted by the user
Returns:
- company_name_prompt (str): The cleared out company name ready to take in the next submission
'''
# Setting the name of the company being profiled
company_name = company_name_prompt
# Retrieving information about the company from Wikipedia
wikipedia_information = wikipedia_api.run(f'{company_name_prompt} (company)')
# Retrieving the list of top 5 competitors from OpenAI
top_5_competitors = top_5_competitors_chain.run(company_name_prompt)
# Retrieving the top 5 strategies for the business from OpenAI
business_strategies = top_5_strategies_chain.run(company_name_prompt)
# Retrieving the top 5 business actions per the strategies derived by OpenAI in the previous step
business_actions = top_5_business_actions_chain.run(business_strategies)
business_actions = f'For the first strategy listed in the previous box, here are five specific business actions to take to further that strategy:{business_actions}'
# Retrieving a summary of the company using the Wikipedia information
company_history = company_history_chain.run(company_name = company_name_prompt, wikipedia_information = wikipedia_information)
# Retrieving a re-written version of the company history as Jar Jar Binks
jar_jar_history = jar_jar_history_chain.run(company_history)
# Retrieving the jingle written by OpenAI using wikipedia information
jingle = jingle_chain.run(company_name = company_name_prompt, wikipedia_information = wikipedia_information)
# Retrieving an HTML code template to display the company history
html_summary_code = html_summary_chain.run(jar_jar_history)
# Clearing out the company name for the next run
company_name_prompt = ''
return company_name_prompt, company_name, top_5_competitors, business_strategies, business_actions, company_history, jar_jar_history, jingle, html_summary_code
## GRADIO UI LAYOUT & FUNCTIONALITY
## ---------------------------------------------------------------------------------------------------------------------
# Defining the building blocks that represent the form and function of the Gradio UI
with gr.Blocks(title = 'Business Profiler', theme = 'base') as business_profiler:
# Setting display into two columns
with gr.Row():
# Setting the display for the first column
with gr.Column(scale = 1):
# Displaying the header image
header_image = gr.Image('business_profiler.png', interactive = False, show_label = False)
# Displaying the interactive text box to input the company name
company_name_prompt = gr.Textbox(placeholder = 'Please type the name of the Fortune 500 company you would like profiled.', label = 'Company to Profile:')
# Creating a button to clear the results
clear_results_button = gr.Button('Clear Results')
# Setting the display for the second column
with gr.Column(scale = 3):
# Displaying all the results appropriately
company_name = gr.Textbox(label = 'Company Name', interactive = False)
top_5_competitors = gr.Textbox(label = 'Top 5 Competitors', interactive = False)
business_strategies = gr.Textbox(label = 'Top 5 Business Stratgies', interactive = False)
business_actions = gr.Textbox(label = 'Top 5 Business Actions to Support First Recommended Business Strategy', interactive = False)
company_history = gr.Textbox(label = 'Company History', interactive = False)
jar_jar_history = gr.Textbox(label = 'Company History (Dictatated by Jar Jar Binks)', interactive = False)
jingle = gr.Textbox(label = 'Company Jingle', interactive = False)
html_summary_code = gr.Code(label = 'HTML Summary Code', language = 'html', interactive = False)
# Defining the behavior for what occurs when the user hits "Enter" after typing a prompt
company_name_prompt.submit(fn = generate_business_profile,
inputs = [company_name_prompt],
outputs = [company_name_prompt, company_name, top_5_competitors, business_strategies, business_actions, company_history, jar_jar_history, jingle, html_summary_code])
# Clearing out all results when the appropriate button is clicked
clear_results_button.click(fn = clear_results, inputs = None, outputs = [company_name, top_5_competitors, business_strategies, business_actions, company_history, jar_jar_history, jingle, html_summary_code])
## SCRIPT INVOCATION
## ---------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
# Launching the Gradio interface
business_profiler.launch() | [
"Which 5 companies are {company_name}'s largest competitors? Give me a numbered list with a brief summary of the competitor's strategic advantage.",
"From this list of business strategies:\n {business_strategies}\n Please develop 5 specific actions to take around the first strategy. Provide me the answer as a bulleted list with brief explanations for each action. Do not provide any text before the bulleted list and do not pad with newline characters.",
"Please provide me HTML code to display a company's historical summary. Give me only the HTML code and nothing else. Here is the historical summary for context: {company_history}",
"Give me five business strategies for {company_name} to grow their business. Provide me the answer as a bulleted list with brief explanations around each strategy. Do not provide any text before the bulleted list and do not pad with newline characters.",
"company_name",
"Please provide me a summary of the company history from this body of Wikipedia information: {wikipedia_information}",
"wikipedia_information",
"Company to Profile:",
"Please write a catchy jingle for {company_name}. Here is some additional information about the company to help you write it: {wikipedia_information}. Do not show me the word \"Jingle\" at the beginning nor pad the beginning of your response with newline characters.",
"Please type the name of the Fortune 500 company you would like profiled.",
"business_strategies",
"Please re-write the following summary in the voice of Jar Jar Binks from Star Wars: {company_history}. Do not add any newline characters at the beginning of your response.",
"company_history"
] |
2024-01-10 | SebastianPartarrieu/paperXai | display~webapp.py | import datetime
import streamlit as st
import openai
import pandas as pd
import numpy as np
import paperxai.credentials as credentials
import paperxai.constants as constants
from paperxai.llms import OpenAI
from paperxai.papers import Arxiv
from paperxai.report.retriever import ReportRetriever
from paperxai.prompt.base import Prompt
########## set up the page ##########
st.set_page_config(
page_title="PaperXAI",
page_icon="🧙♂️",
layout="wide",
)
st.header("paperXai🧙")
st.subheader("A web app to explore recent papers")
if "create_report_button_clicked" not in st.session_state:
st.session_state.create_report_button_clicked = False
if "report" not in st.session_state:
st.session_state.report = {"topics": [], "llm_answers": [], "papers": []}
if "report_string" not in st.session_state:
st.session_state.report_string = ""
def check_session_state_key_empty(session_state: dict, state_key: str) -> bool: # will put in utils file
if state_key not in session_state:
return True
elif session_state[state_key] in ["", None]:
return True
return False
def click_button() -> None: # will put in utils file
# check that model has been selected + API key entered + webpage url entered
if (
(check_session_state_key_empty(st.session_state, "model"))
or (check_session_state_key_empty(st.session_state, "OPENAI_API_KEY"))
):
st.session_state.create_report_button_clicked = False
else:
st.session_state.create_report_button_clicked = True
def define_api_key_input() -> str:
current_key = st.session_state.get("OPENAI_API_KEY", "")
if not (current_key in [None, ""]):
return current_key
elif not (credentials.OPENAI_API_KEY in [None, ""]):
return credentials.OPENAI_API_KEY
else:
return ""
def format_topics(topics: list[str]) -> str:
formatted_topics = ""
for topic in topics:
formatted_topics += "- " + topic + "\n"
return formatted_topics
def format_html_to_markdown(html_string: str) -> str:
html_string = (html_string.replace("<h2>", "###")
.replace("</h2>", "\n")
.replace("<h3>", "####")
.replace("</h3>", "\n")
.replace("<h4> ", "\n**")
.replace(" </h4>", "**\n"))
html_string = html_string.replace("<p>", "\n").replace("</p>", "\n")
html_string = html_string.replace("<ul>", "").replace("</ul>", "")
html_string = html_string.replace("<li>", "-").replace("</li>", "\n")
return html_string
if "OPENAI_API_KEY" in st.session_state:
openai.api_key = st.session_state.OPENAI_API_KEY
########## sidebar ##########
with st.sidebar:
st.markdown(
"## How to use\n"
"1. Enter your [OpenAI API key](https://platform.openai.com/account/api-keys) below🔑\n"
"2. Fill out the information you want to search for in the latest papers and model/pipeline parameters\n"
"3. Chat with the model about the papers you find most interesting 💬\n"
)
api_key_input = st.text_input(
"OpenAI API Key",
type="password",
placeholder="Enter your OpenAI API key here (sk-...)",
help="You can get your API key from https://platform.openai.com/account/api-keys.",
value=define_api_key_input(),
)
st.session_state["OPENAI_API_KEY"] = api_key_input
model = st.sidebar.selectbox(
"Select the model you want to use:",
[
"gpt-3.5-turbo",
"gpt-4",
],
index=0,
key="model",
)
max_papers = st.sidebar.number_input(
"Input the # of papers you want to search through",
step=1,
format="%i",
value=1000,
key="max_papers",
)
labels_arxiv_categories = pd.read_csv(constants.ROOT_DIR+"/data/arxiv/categories.csv", sep=";")
labels_arxiv_categories.index = labels_arxiv_categories["ID"]
st.sidebar.multiselect(
"Select the arXiv CS [categories](https://arxiv.org/category_taxonomy) used to search papers:",
options=labels_arxiv_categories["ID"],
default=["cs.AI"],
format_func=lambda x: labels_arxiv_categories.loc[x]["Name"],
key="arxiv_categories",
)
st.date_input(
"Start date for articles to include",
value=datetime.datetime.now() - datetime.timedelta(days=10),
min_value=datetime.datetime.now() - datetime.timedelta(weeks=104),
max_value=datetime.datetime.now() - datetime.timedelta(days=1),
key="start_date",
help="You should only include the last few days if you want the web app to run in reasonable time."
)
st.date_input(
"End date for articles to include",
value=datetime.datetime.now(),
min_value=datetime.datetime.now() - datetime.timedelta(weeks=103),
max_value=datetime.datetime.now(),
key="end_date"
)
st.markdown("---")
st.markdown("# About")
st.markdown(
"🧙paperXai allows you to filter through all the latest papers "
"based off your questions. You can then chat to the model about "
"the papers you find most interesting."
)
st.markdown(
"This tool is a work in progress. "
"You can contribute to the project on [GitHub](https://github.com/SebastianPartarrieu/paperXai/) "
"with your feedback and suggestions💡"
)
st.markdown("Made by [Seb](https://twitter.com/seb_partarr)")
st.markdown("---")
openai_api_key = st.session_state.get("OPENAI_API_KEY")
if not openai_api_key:
st.warning(
"Enter your OpenAI API key. You can get one from"
" https://platform.openai.com/account/api-keys."
)
########## main ##########
tab1, tab2 = st.tabs(["Define", "View"])
with tab1:
with st.form(key="qa_form"):
query = st.text_area("What topic or precise question do you want to learn about?")
col1, col2, col3 = st.columns([1, 1, 5])
with col1:
submit = st.form_submit_button("Add to report")
with col2:
clean_topics = st.form_submit_button("Remove topics")
if submit:
if not (query in st.session_state.report["topics"]):
st.session_state.report["topics"].append(query)
if clean_topics:
st.session_state.report["topics"] = []
st.markdown(f"**Current topics in the report:**\n"
f"{format_topics(st.session_state.report['topics'])}")
create_report = st.button("Create report", on_click=click_button)
if create_report:
if st.session_state.report["llm_answers"] == [] and st.session_state.report["topics"] != []:
with st.spinner("Creating your report..."):
# define language model
openai_model = OpenAI(
chat_model=st.session_state.model,
embedding_model="text-embedding-ada-002",
temperature=0.0,
max_tokens=1000,
)
# get arxiv papers
arxiv = Arxiv()
arxiv.get_papers(categories=st.session_state.arxiv_categories,
max_results=int(st.session_state.max_papers),
start_date=st.session_state.start_date,
end_date=st.session_state.end_date)
arxiv.write_papers()
# load papers and compute embeddings
df_papers = pd.read_csv(constants.ROOT_DIR + "/data/arxiv/current_papers.csv",
parse_dates=["Published Date"])
df_papers["Embeddings"] = df_papers["String_representation"].apply(
lambda x: openai_model.get_embeddings(text=x)
)
papers_embeddings = df_papers["Embeddings"].values
papers_embeddings = np.vstack(papers_embeddings)
# save embeddings
np.save(constants.ROOT_DIR + "/data/arxiv/papers_embeddings.npy", papers_embeddings)
# create report
prompter = Prompt()
# create config
report_config = {"title": "Streamlit arXiv digest",
"sections": {"section 1": {"title": "arXiv based responses",
"questions": st.session_state.report['topics']}}}
report_retriever = ReportRetriever(
language_model=openai_model,
prompter=prompter,
papers_embedding=papers_embeddings,
df_papers=df_papers,
config=report_config,
)
report = report_retriever.create_report()
st.session_state.report["llm_answers"] = report["arXiv based responses"]["chat_responses"]
st.session_state.report["papers"] = report["arXiv based responses"]["papers"]
report_string = report_retriever.format_report()
st.session_state.report_string = report_string
st.text("Report created, look at the view tab!")
with tab2:
if "report_string" in st.session_state:
if not (st.session_state.report_string in [None, ""]):
st.markdown(
format_html_to_markdown(report_string)
)
else:
st.markdown(
"**Please run the report creation!**"
) | [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.