date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | baronrustamov/gtp3-wordpress-post-generator | gpt3_wordpress.py | import openai
import rich
import typer
from rich.progress import Progress, SpinnerColumn, TextColumn
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.methods.posts import NewPost
class Gpt3Wordpress:
def __init__(self, api_key: str, blog_url: str, username: str, password: str):
openai.api_key = api_key
self.blog_url = blog_url + "/xmlrpc.php"
self.username = username
self.password = password
def _gpt3_query(self, prompt: str) -> str:
"""Query the OpenAI GPT-3 API with the given prompt and return the response."""
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.6,
max_tokens=3000,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.6,
)
return response.choices[0].text
except Exception() as e:
rich.print(f"Error: {e}")
exit(1)
def _generate_loop(self, prompt: str, kind: str) -> str:
"""Generate a title or post until it meets the requirements."""
while True:
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
) as progress:
progress.add_task(description=f"Generating {kind}...", total=None)
content = self._gpt3_query(prompt)
rich.print(f"\nGenerated {kind}: {content}")
confirm = typer.confirm(f"\nDo you like the generated {kind}?")
if confirm:
return content
def generate_post_title(self, topic: str) -> str:
"""Generate a post title."""
return self._generate_loop(f"Blog post title."
f"Title must be about {topic}."
f"Length must be maximum 70 characters",
"title")
def generate_post(self, title: str, tone: str, max_words: int) -> str:
"""Generate a post."""
return self._generate_loop(f"Blog post which titles {title}. "
f"Tone must be {tone}. "
f"Length must be maximum {max_words} words.",
"post")
def create_wordpress_post(self, title: str, content: str) -> None:
"""Create a WordPress post."""
client = Client(self.blog_url, self.username, self.password)
post = WordPressPost()
post.title = title
post.content = content
post.post_status = 'draft'
try:
client.call(NewPost(post))
except Exception() as e:
rich.print(f"Error: {e}")
exit(1)
| [] |
2024-01-10 | Yuqifan1117/HalluciDoctor | utils~prompt_generation.py | import requests
from retrying import retry
from openai import OpenAI
@retry(stop_max_attempt_number=10, wait_fixed=2000)
def send_request(data):
# openai api for completion or chat
# if use_completion:
# data = convert_chat_to_completion(data)
# try:
# response = requests.post(url, headers=HEADER, data=json.dumps(data), verify=False, proxies=PROXY)
# response_json = json.loads(response.text)
# if "choices" in response_json and len(response_json["choices"]) > 0:
# if use_completion:
# return response.json()["choices"][0]["text"]
# else:
# return response.json()["choices"][0]["message"]["content"]
# else:
# return response_json
# except requests.exceptions.RequestException as e:
# print(f"Error: {e}")
# chinese api for completion or chat
client = OpenAI(
api_key="",
base_url=""
)
try:
# stream
response = client.chat.completions.create(
model=data['model'],
messages=data["messages"],
temperature=data["temperature"],
stream=False,
)
content = response.choices[0].message.content
return content
except requests.exceptions.Timeout as e:
print(f"Timeout Error: {e}")
except Exception as e:
print(f"Error: {e}")
return None
# Include questions must be answerable and about the context of the description, including the object types, object actions, object locations, relationships between objects, etc.
def qgqa_generation(caption, answer):
messages = [{"role": "user", "content": f"{caption}\nPlease give me meaningful and answerable questions corresponding to the following answers based on the given context to help me understand the context. Please ensure that each question doesn't involve 'How many' and is concise to exactly match the corresponding answer.\nAnswer: {answer}"}]
generation_data = {
"model": 'gpt-3.5-turbo',
"messages": messages,
"temperature": 1.0
}
results = send_request(generation_data)
retries = 0
while (results == None or 'error' in results) and retries < 3:
results = send_request(generation_data)
retries += 1
if results is not None and 'error' not in results:
results = results.split('\n\n')
else:
return ""
return results
def refine_passage(passage, hallucination_phrases):
PROMPT_TEMPLATE='''Given a passage and wrong phrases, you are required to remove all of them in the passage and output the refined passage in a fluent and natural style, following these rules:
1. Try to remove wrong phrases and do not use other phrases to replace
Examples:
Passage:
In addition to the sandwiches of various sizes, a bowl, a cup, and a spoon can be seen on the table, suggesting that the guests are sharing food and drinks.
Wrong phrases:
['spoon', 'drinks', 'sandwiches is various sizes']
Refined passage:
In addition to the sandwiches, a bowl and a cup can be seen on the table, suggesting that the guests are sharing food.
Passage:
The image depicts a scene of two giraffes standing on a dirt road near a fence. There are three cars parked in the background, with one on the left side and two more on the right side.
Wrong phrases:
['cars', 'cars are three']
Refined passage:
The image depicts a scene of two giraffes standing on a dirt road near a fence.
Passage:
{passage}
Wrong phrases:
{Hallu_phrase}
Refined passage: '''
content = PROMPT_TEMPLATE.format(passage=passage, Hallu_phrase=hallucination_phrases)
message_example = [
{"role": "system", "content": 'You are a language assistant that helps to refine a passage with wrong phrases removed.'},
{"role": "user", "content": content}
]
generation_data = {
"model": 'gpt-3.5-turbo',
"messages": message_example,
"temperature": 0.5
}
results = send_request(generation_data)
retries = 0
while (results == None or 'error' in results) and retries < 5:
results = send_request(generation_data)
retries += 1
if results is not None and 'error' not in results:
return results
else:
return ""
def LLM_evaluation_hallucination(coco_captions, bounding_box, description_1, description_2, description_3, description_4, description_5):
PROMPT_TEMPLATE='''Suppose you are a hallucination annotator who judges the degree of hallucination based on the number of errors in the description of objects, relationships, and attributes, and you have the following real image information.
Reference captions: {coco_captions}
Bounding box: {bounding_box}
Please just provide the hallucination score(1-5) for the below descriptions without any explanation, where the fewer descriptive errors in the caption, the higher the hallucination score given. The output format: [x,...]
Descriptions:
caption 1: {description_1}
caption 2: {description_2}
caption 3: {description_3}
caption 4: {description_4}
caption 5: {description_5}
Output: '''
total_prompt = PROMPT_TEMPLATE.format(coco_captions=coco_captions,bounding_box=bounding_box,description_1=description_1,description_2=description_2,description_3=description_3,description_4=description_4,description_5=description_5)
message_example = [
{"role": "user", "content": total_prompt}
]
generation_data = {
"model": 'gpt-3.5-turbo',
"messages": message_example,
"temperature": 0.8
}
results = send_request(generation_data)
retries = 0
while (results == None or 'error' in results) and retries < 5:
results = send_request(generation_data)
retries += 1
if results is not None and 'error' not in results:
return results
else:
return ""
def LLM_evaluation_details(coco_captions, bounding_box, description_1, description_2, description_3, description_4, description_5):
PROMPT_TEMPLATE='''Suppose you are an image detail annotator who judges the degree of sentence diversity based on the number of objects, relations, and attributes.
Please just provide the diversity score(1-5) for the below descriptions without any explanation, where longer caption with more content give a higher diversity score. The output format: [x,...]
Descriptions:
caption 1: {description_1}
caption 2: {description_2}
caption 3: {description_3}
caption 4: {description_4}
caption 5: {description_5}
Output: '''
total_prompt = PROMPT_TEMPLATE.format(coco_captions=coco_captions,bounding_box=bounding_box,description_1=description_1,description_2=description_2,description_3=description_3,description_4=description_4,description_5=description_5)
message_example = [
{"role": "user", "content": total_prompt}
]
generation_data = {
"model": 'gpt-3.5-turbo',
"messages": message_example,
"temperature": 0.8
}
results = send_request(generation_data)
retries = 0
while (results == None or 'error' in results) and retries < 5:
results = send_request(generation_data)
retries += 1
if results is not None and 'error' not in results:
return results
else:
return ""
| [
"PLACEHOLDER\nPlease give me meaningful and answerable questions corresponding to the following answers based on the given context to help me understand the context. Please ensure that each question doesn't involve 'How many' and is concise to exactly match the corresponding answer.\nAnswer: PLACEHOLDER",
"Given a passage and wrong phrases, you are required to remove all of them in the passage and output the refined passage in a fluent and natural style, following these rules:\n 1. Try to remove wrong phrases and do not use other phrases to replace\n \n Examples:\n Passage:\n In addition to the sandwiches of various sizes, a bowl, a cup, and a spoon can be seen on the table, suggesting that the guests are sharing food and drinks.\n Wrong phrases:\n ['spoon', 'drinks', 'sandwiches is various sizes']\n Refined passage: \n In addition to the sandwiches, a bowl and a cup can be seen on the table, suggesting that the guests are sharing food.\n \n Passage:\n The image depicts a scene of two giraffes standing on a dirt road near a fence. There are three cars parked in the background, with one on the left side and two more on the right side.\n Wrong phrases:\n ['cars', 'cars are three']\n Refined passage:\n The image depicts a scene of two giraffes standing on a dirt road near a fence.\n\n Passage:\n {passage}\n Wrong phrases:\n {Hallu_phrase}\n Refined passage: ",
"You are a language assistant that helps to refine a passage with wrong phrases removed.",
"Suppose you are a hallucination annotator who judges the degree of hallucination based on the number of errors in the description of objects, relationships, and attributes, and you have the following real image information. \n Reference captions: {coco_captions}\n Bounding box: {bounding_box}\n Please just provide the hallucination score(1-5) for the below descriptions without any explanation, where the fewer descriptive errors in the caption, the higher the hallucination score given. The output format: [x,...]\n Descriptions:\n caption 1: {description_1}\n caption 2: {description_2}\n caption 3: {description_3}\n caption 4: {description_4}\n caption 5: {description_5}\n Output: ",
"Suppose you are an image detail annotator who judges the degree of sentence diversity based on the number of objects, relations, and attributes. \n Please just provide the diversity score(1-5) for the below descriptions without any explanation, where longer caption with more content give a higher diversity score. The output format: [x,...]\n Descriptions:\n caption 1: {description_1}\n caption 2: {description_2}\n caption 3: {description_3}\n caption 4: {description_4}\n caption 5: {description_5}\n Output: "
] |
2024-01-10 | stephenomalley/dd-trace-py | ddtrace~contrib~langchain~patch.py | import os
import sys
from typing import Any
from typing import Dict
from typing import Optional
from typing import TYPE_CHECKING
import langchain
from langchain.callbacks.openai_info import get_openai_token_cost_for_model
import wrapt
from ddtrace import config
from ddtrace.constants import ERROR_TYPE
from ddtrace.contrib._trace_utils_llm import BaseLLMIntegration
from ddtrace.contrib.langchain.constants import API_KEY
from ddtrace.contrib.langchain.constants import COMPLETION_TOKENS
from ddtrace.contrib.langchain.constants import MODEL
from ddtrace.contrib.langchain.constants import PROMPT_TOKENS
from ddtrace.contrib.langchain.constants import PROVIDER
from ddtrace.contrib.langchain.constants import TOTAL_COST
from ddtrace.contrib.langchain.constants import TYPE
from ddtrace.contrib.langchain.constants import text_embedding_models
from ddtrace.contrib.langchain.constants import vectorstore_classes
from ddtrace.contrib.trace_utils import unwrap
from ddtrace.contrib.trace_utils import with_traced_module
from ddtrace.contrib.trace_utils import wrap
from ddtrace.internal.agent import get_stats_url
from ddtrace.internal.logger import get_logger
from ddtrace.internal.utils import ArgumentError
from ddtrace.internal.utils import get_argument_value
from ddtrace.internal.utils.formats import asbool
from ddtrace.internal.utils.formats import deep_getattr
from ddtrace.pin import Pin
if TYPE_CHECKING:
from ddtrace import Span
log = get_logger(__name__)
def get_version():
# type: () -> str
return getattr(langchain, "__version__", "")
config._add(
"langchain",
{
"logs_enabled": asbool(os.getenv("DD_LANGCHAIN_LOGS_ENABLED", False)),
"metrics_enabled": asbool(os.getenv("DD_LANGCHAIN_METRICS_ENABLED", True)),
"span_prompt_completion_sample_rate": float(os.getenv("DD_LANGCHAIN_SPAN_PROMPT_COMPLETION_SAMPLE_RATE", 1.0)),
"log_prompt_completion_sample_rate": float(os.getenv("DD_LANGCHAIN_LOG_PROMPT_COMPLETION_SAMPLE_RATE", 0.1)),
"span_char_limit": int(os.getenv("DD_LANGCHAIN_SPAN_CHAR_LIMIT", 128)),
"_api_key": os.getenv("DD_API_KEY"),
},
)
class _LangChainIntegration(BaseLLMIntegration):
_integration_name = "langchain"
def __init__(self, config, stats_url, site, api_key):
super().__init__(config, stats_url, site, api_key)
def _set_base_span_tags(self, span, interface_type="", provider=None, model=None, api_key=None):
# type: (Span, str, Optional[str], Optional[str], Optional[str]) -> None
"""Set base level tags that should be present on all LangChain spans (if they are not None)."""
span.set_tag_str(TYPE, interface_type)
if provider is not None:
span.set_tag_str(PROVIDER, provider)
if model is not None:
span.set_tag_str(MODEL, model)
if api_key is not None:
if len(api_key) >= 4:
span.set_tag_str(API_KEY, "...%s" % str(api_key[-4:]))
else:
span.set_tag_str(API_KEY, api_key)
@classmethod
def _logs_tags(cls, span):
# type: (Span) -> str
api_key = span.get_tag(API_KEY) or ""
tags = "env:%s,version:%s,%s:%s,%s:%s,%s:%s,%s:%s" % ( # noqa: E501
(config.env or ""),
(config.version or ""),
PROVIDER,
(span.get_tag(PROVIDER) or ""),
MODEL,
(span.get_tag(MODEL) or ""),
TYPE,
(span.get_tag(TYPE) or ""),
API_KEY,
api_key,
)
return tags
@classmethod
def _metrics_tags(cls, span):
# type: (Span) -> list
provider = span.get_tag(PROVIDER) or ""
api_key = span.get_tag(API_KEY) or ""
tags = [
"version:%s" % (config.version or ""),
"env:%s" % (config.env or ""),
"service:%s" % (span.service or ""),
"%s:%s" % (PROVIDER, provider),
"%s:%s" % (MODEL, span.get_tag(MODEL) or ""),
"%s:%s" % (TYPE, span.get_tag(TYPE) or ""),
"%s:%s" % (API_KEY, api_key),
"error:%d" % span.error,
]
err_type = span.get_tag(ERROR_TYPE)
if err_type:
tags.append("%s:%s" % (ERROR_TYPE, err_type))
return tags
def record_usage(self, span, usage):
# type: (Span, Dict[str, Any]) -> None
if not usage or self._config.metrics_enabled is False:
return
for token_type in ("prompt", "completion", "total"):
num_tokens = usage.get("token_usage", {}).get(token_type + "_tokens")
if not num_tokens:
continue
self.metric(span, "dist", "tokens.%s" % token_type, num_tokens)
total_cost = span.get_metric(TOTAL_COST)
if total_cost:
self.metric(span, "incr", "tokens.total_cost", total_cost)
def _extract_model_name(instance):
# type: (langchain.llm.BaseLLM) -> Optional[str]
"""Extract model name or ID from llm instance."""
for attr in ("model", "model_name", "model_id", "model_key", "repo_id"):
if hasattr(instance, attr):
return getattr(instance, attr)
return None
def _format_api_key(api_key):
# type: (str) -> str
"""Obfuscate a given LLM provider API key by returning the last four characters."""
if not api_key or len(api_key) < 4:
return ""
return "...%s" % api_key[-4:]
def _extract_api_key(instance):
# type: (Any) -> str
"""
Extract and format LLM-provider API key from instance.
Note that langchain's LLM/ChatModel/Embeddings interfaces do not have a
standard attribute name for storing the provider-specific API key, so make a
best effort here by checking for attributes that end with `api_key/api_token`.
"""
api_key_attrs = [a for a in dir(instance) if a.endswith(("api_token", "api_key"))]
if api_key_attrs and hasattr(instance, str(api_key_attrs[0])):
api_key = getattr(instance, api_key_attrs[0], None)
if api_key:
return _format_api_key(api_key)
return ""
def _tag_openai_token_usage(span, llm_output, propagated_cost=0, propagate=False):
# type: (Span, Dict[str, Any], int, bool) -> None
"""
Extract token usage from llm_output, tag on span.
Calculate the total cost for each LLM/chat_model, then propagate those values up the trace so that
the root span will store the total token_usage/cost of all of its descendants.
"""
for token_type in ("prompt", "completion", "total"):
current_metric_value = span.get_metric("langchain.tokens.%s_tokens" % token_type) or 0
metric_value = llm_output["token_usage"].get("%s_tokens" % token_type, 0)
span.set_metric("langchain.tokens.%s_tokens" % token_type, current_metric_value + metric_value)
total_cost = span.get_metric(TOTAL_COST) or 0
if not propagate:
try:
completion_cost = get_openai_token_cost_for_model(
span.get_tag(MODEL),
span.get_metric(COMPLETION_TOKENS),
is_completion=True,
)
prompt_cost = get_openai_token_cost_for_model(span.get_tag(MODEL), span.get_metric(PROMPT_TOKENS))
total_cost = completion_cost + prompt_cost
except ValueError:
# If not in langchain's openai model catalog, the above helpers will raise a ValueError.
log.debug("Cannot calculate token/cost as the model is not in LangChain's OpenAI model catalog.")
span.set_metric(TOTAL_COST, propagated_cost + total_cost)
if span._parent is not None:
_tag_openai_token_usage(span._parent, llm_output, propagated_cost=propagated_cost + total_cost, propagate=True)
@with_traced_module
def traced_llm_generate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type
prompts = get_argument_value(args, kwargs, 0, "prompts")
integration = langchain._datadog_integration
model = _extract_model_name(instance)
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="llm",
provider=llm_provider,
model=model,
api_key=_extract_api_key(instance),
)
completions = None
try:
if integration.is_pc_sampled_span(span):
for idx, prompt in enumerate(prompts):
span.set_tag_str("langchain.request.prompts.%d" % idx, integration.trunc(str(prompt)))
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
completions = func(*args, **kwargs)
if isinstance(instance, langchain.llms.OpenAI):
_tag_openai_token_usage(span, completions.llm_output)
integration.record_usage(span, completions.llm_output)
for idx, completion in enumerate(completions.generations):
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.response.completions.%d.text" % idx, integration.trunc(completion[0].text))
if completion and completion[0].generation_info is not None:
span.set_tag_str(
"langchain.response.completions.%d.finish_reason" % idx,
str(completion[0].generation_info.get("finish_reason")),
)
span.set_tag_str(
"langchain.response.completions.%d.logprobs" % idx,
str(completion[0].generation_info.get("logprobs")),
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if completions is None:
log_completions = []
else:
log_completions = [
[{"text": completion.text} for completion in completions] for completions in completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"prompts": prompts,
"choices": log_completions,
},
)
return completions
@with_traced_module
async def traced_llm_agenerate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type
prompts = get_argument_value(args, kwargs, 0, "prompts")
integration = langchain._datadog_integration
model = _extract_model_name(instance)
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="llm",
provider=llm_provider,
model=model,
api_key=_extract_api_key(instance),
)
completions = None
try:
if integration.is_pc_sampled_span(span):
for idx, prompt in enumerate(prompts):
span.set_tag_str("langchain.request.prompts.%d" % idx, integration.trunc(str(prompt)))
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
completions = await func(*args, **kwargs)
if isinstance(instance, langchain.llms.OpenAI):
_tag_openai_token_usage(span, completions.llm_output)
integration.record_usage(span, completions.llm_output)
for idx, completion in enumerate(completions.generations):
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.response.completions.%d.text" % idx, integration.trunc(completion[0].text))
if completion and completion[0].generation_info is not None:
span.set_tag_str(
"langchain.response.completions.%d.finish_reason" % idx,
str(completion[0].generation_info.get("finish_reason")),
)
span.set_tag_str(
"langchain.response.completions.%d.logprobs" % idx,
str(completion[0].generation_info.get("logprobs")),
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if completions is None:
log_completions = []
else:
log_completions = [
[{"text": completion.text} for completion in completions] for completions in completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"prompts": prompts,
"choices": log_completions,
},
)
return completions
@with_traced_module
def traced_chat_model_generate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type.split("-")[0]
chat_messages = get_argument_value(args, kwargs, 0, "messages")
integration = langchain._datadog_integration
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="chat_model",
provider=llm_provider,
model=_extract_model_name(instance),
api_key=_extract_api_key(instance),
)
chat_completions = None
try:
for message_set_idx, message_set in enumerate(chat_messages):
for message_idx, message in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.request.messages.%d.%d.content" % (message_set_idx, message_idx),
integration.trunc(message.content),
)
span.set_tag_str(
"langchain.request.messages.%d.%d.message_type" % (message_set_idx, message_idx),
message.__class__.__name__,
)
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
chat_completions = func(*args, **kwargs)
if isinstance(instance, langchain.chat_models.ChatOpenAI):
_tag_openai_token_usage(span, chat_completions.llm_output)
integration.record_usage(span, chat_completions.llm_output)
for message_set_idx, message_set in enumerate(chat_completions.generations):
for idx, chat_completion in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.response.completions.%d.%d.content" % (message_set_idx, idx),
integration.trunc(chat_completion.text),
)
span.set_tag_str(
"langchain.response.completions.%d.%d.message_type" % (message_set_idx, idx),
chat_completion.message.__class__.__name__,
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if chat_completions is None:
log_chat_completions = []
else:
log_chat_completions = [
[
{
"content": message.text,
"message_type": message.message.__class__.__name__,
}
for message in messages
]
for messages in chat_completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"messages": [
[
{
"content": message.content,
"message_type": message.__class__.__name__,
}
for message in messages
]
for messages in chat_messages
],
"choices": log_chat_completions,
},
)
return chat_completions
@with_traced_module
async def traced_chat_model_agenerate(langchain, pin, func, instance, args, kwargs):
llm_provider = instance._llm_type.split("-")[0]
chat_messages = get_argument_value(args, kwargs, 0, "messages")
integration = langchain._datadog_integration
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="chat_model",
provider=llm_provider,
model=_extract_model_name(instance),
api_key=_extract_api_key(instance),
)
chat_completions = None
try:
for message_set_idx, message_set in enumerate(chat_messages):
for message_idx, message in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.request.messages.%d.%d.content" % (message_set_idx, message_idx),
integration.trunc(message.content),
)
span.set_tag_str(
"langchain.request.messages.%d.%d.message_type" % (message_set_idx, message_idx),
message.__class__.__name__,
)
for param, val in getattr(instance, "_identifying_params", {}).items():
if isinstance(val, dict):
for k, v in val.items():
span.set_tag_str("langchain.request.%s.parameters.%s.%s" % (llm_provider, param, k), str(v))
else:
span.set_tag_str("langchain.request.%s.parameters.%s" % (llm_provider, param), str(val))
chat_completions = await func(*args, **kwargs)
if isinstance(instance, langchain.chat_models.ChatOpenAI):
_tag_openai_token_usage(span, chat_completions.llm_output)
integration.record_usage(span, chat_completions.llm_output)
for message_set_idx, message_set in enumerate(chat_completions.generations):
for idx, chat_completion in enumerate(message_set):
if integration.is_pc_sampled_span(span):
span.set_tag_str(
"langchain.response.completions.%d.%d.content" % (message_set_idx, idx),
integration.trunc(chat_completion.text),
)
span.set_tag_str(
"langchain.response.completions.%d.%d.message_type" % (message_set_idx, idx),
chat_completion.message.__class__.__name__,
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
if chat_completions is None:
log_chat_completions = []
else:
log_chat_completions = [
[
{
"content": message.text,
"message_type": message.message.__class__.__name__,
}
for message in messages
]
for messages in chat_completions.generations
]
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"messages": [
[
{
"content": message.content,
"message_type": message.__class__.__name__,
}
for message in messages
]
for messages in chat_messages
],
"choices": log_chat_completions,
},
)
return chat_completions
@with_traced_module
def traced_embedding(langchain, pin, func, instance, args, kwargs):
"""
This traces both embed_query(text) and embed_documents(texts), so we need to make sure
we get the right arg/kwarg.
"""
try:
input_texts = get_argument_value(args, kwargs, 0, "texts")
except ArgumentError:
input_texts = get_argument_value(args, kwargs, 0, "text")
provider = instance.__class__.__name__.split("Embeddings")[0].lower()
integration = langchain._datadog_integration
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="embedding",
provider=provider,
model=_extract_model_name(instance),
api_key=_extract_api_key(instance),
)
try:
if isinstance(input_texts, str):
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.request.inputs.0.text", integration.trunc(input_texts))
span.set_metric("langchain.request.input_count", 1)
else:
if integration.is_pc_sampled_span(span):
for idx, text in enumerate(input_texts):
span.set_tag_str("langchain.request.inputs.%d.text" % idx, integration.trunc(text))
span.set_metric("langchain.request.input_count", len(input_texts))
# langchain currently does not support token tracking for OpenAI embeddings:
# https://github.com/hwchase17/langchain/issues/945
embeddings = func(*args, **kwargs)
if isinstance(embeddings, list) and isinstance(embeddings[0], list):
for idx, embedding in enumerate(embeddings):
span.set_metric("langchain.response.outputs.%d.embedding_length" % idx, len(embedding))
else:
span.set_metric("langchain.response.outputs.embedding_length", len(embeddings))
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={"inputs": [input_texts] if isinstance(input_texts, str) else input_texts},
)
return embeddings
@with_traced_module
def traced_chain_call(langchain, pin, func, instance, args, kwargs):
integration = langchain._datadog_integration
span = integration.trace(pin, "%s.%s" % (instance.__module__, instance.__class__.__name__), interface_type="chain")
final_outputs = {}
try:
inputs = get_argument_value(args, kwargs, 0, "inputs")
if not isinstance(inputs, dict):
inputs = {instance.input_keys[0]: inputs}
if integration.is_pc_sampled_span(span):
for k, v in inputs.items():
span.set_tag_str("langchain.request.inputs.%s" % k, integration.trunc(str(v)))
template = deep_getattr(instance, "prompt.template", default="")
if template:
span.set_tag_str("langchain.request.prompt", integration.trunc(str(template)))
final_outputs = func(*args, **kwargs)
if integration.is_pc_sampled_span(span):
for k, v in final_outputs.items():
span.set_tag_str("langchain.response.outputs.%s" % k, integration.trunc(str(v)))
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
log_inputs = {}
log_outputs = {}
for k, v in inputs.items():
log_inputs[k] = str(v)
for k, v in final_outputs.items():
log_outputs[k] = str(v)
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"inputs": log_inputs,
"prompt": str(deep_getattr(instance, "prompt.template", default="")),
"outputs": log_outputs,
},
)
return final_outputs
@with_traced_module
async def traced_chain_acall(langchain, pin, func, instance, args, kwargs):
integration = langchain._datadog_integration
span = integration.trace(pin, "%s.%s" % (instance.__module__, instance.__class__.__name__), interface_type="chain")
final_outputs = {}
try:
inputs = get_argument_value(args, kwargs, 0, "inputs")
if not isinstance(inputs, dict):
inputs = {instance.input_keys[0]: inputs}
if integration.is_pc_sampled_span(span):
for k, v in inputs.items():
span.set_tag_str("langchain.request.inputs.%s" % k, integration.trunc(str(v)))
template = deep_getattr(instance, "prompt.template", default="")
if template:
span.set_tag_str("langchain.request.prompt", integration.trunc(str(template)))
final_outputs = await func(*args, **kwargs)
if integration.is_pc_sampled_span(span):
for k, v in final_outputs.items():
span.set_tag_str("langchain.response.outputs.%s" % k, integration.trunc(str(v)))
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
log_inputs = {}
log_outputs = {}
for k, v in inputs.items():
log_inputs[k] = str(v)
for k, v in final_outputs.items():
log_outputs[k] = str(v)
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"inputs": log_inputs,
"prompt": str(deep_getattr(instance, "prompt.template", default="")),
"outputs": log_outputs,
},
)
return final_outputs
@with_traced_module
def traced_similarity_search(langchain, pin, func, instance, args, kwargs):
integration = langchain._datadog_integration
query = get_argument_value(args, kwargs, 0, "query")
k = kwargs.get("k", args[1] if len(args) >= 2 else None)
provider = instance.__class__.__name__.lower()
span = integration.trace(
pin,
"%s.%s" % (instance.__module__, instance.__class__.__name__),
interface_type="similarity_search",
provider=provider,
api_key=_extract_api_key(instance),
)
documents = []
try:
if integration.is_pc_sampled_span(span):
span.set_tag_str("langchain.request.query", integration.trunc(query))
if k is not None:
span.set_tag_str("langchain.request.k", str(k))
for kwarg_key, v in kwargs.items():
span.set_tag_str("langchain.request.%s" % kwarg_key, str(v))
if isinstance(instance, langchain.vectorstores.Pinecone):
span.set_tag_str(
"langchain.request.pinecone.environment",
instance._index.configuration.server_variables.get("environment", ""),
)
span.set_tag_str(
"langchain.request.pinecone.index_name",
instance._index.configuration.server_variables.get("index_name", ""),
)
span.set_tag_str(
"langchain.request.pinecone.project_name",
instance._index.configuration.server_variables.get("project_name", ""),
)
api_key = instance._index.configuration.api_key.get("ApiKeyAuth", "")
span.set_tag_str(API_KEY, "...%s" % api_key[-4:]) # override api_key for Pinecone
documents = func(*args, **kwargs)
span.set_metric("langchain.response.document_count", len(documents))
for idx, document in enumerate(documents):
span.set_tag_str(
"langchain.response.document.%d.page_content" % idx, integration.trunc(str(document.page_content))
)
for kwarg_key, v in document.metadata.items():
span.set_tag_str(
"langchain.response.document.%d.metadata.%s" % (idx, kwarg_key), integration.trunc(str(v))
)
except Exception:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)
raise
finally:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)
if integration.is_pc_sampled_log(span):
integration.log(
span,
"info" if span.error == 0 else "error",
"sampled %s.%s" % (instance.__module__, instance.__class__.__name__),
attrs={
"query": query,
"k": k or "",
"documents": [
{"page_content": document.page_content, "metadata": document.metadata} for document in documents
],
},
)
return documents
def patch():
if getattr(langchain, "_datadog_patch", False):
return
langchain._datadog_patch = True
# TODO: How do we test this? Can we mock out the metric/logger/sampler?
ddsite = os.getenv("DD_SITE", "datadoghq.com")
ddapikey = os.getenv("DD_API_KEY", config.langchain._api_key)
Pin().onto(langchain)
integration = _LangChainIntegration(
config=config.langchain,
stats_url=get_stats_url(),
site=ddsite,
api_key=ddapikey,
)
langchain._datadog_integration = integration
if config.langchain.logs_enabled:
if not ddapikey:
raise ValueError(
"DD_API_KEY is required for sending logs from the LangChain integration."
" The LangChain integration can be disabled by setting the ``DD_TRACE_LANGCHAIN_ENABLED``"
" environment variable to False."
)
integration.start_log_writer()
# Langchain doesn't allow wrapping directly from root, so we have to import the base classes first before wrapping.
# ref: https://github.com/DataDog/dd-trace-py/issues/7123
from langchain import embeddings # noqa
from langchain import vectorstores # noqa
from langchain.chains.base import Chain # noqa
from langchain.chat_models.base import BaseChatModel # noqa
from langchain.llms.base import BaseLLM # noqa
wrap("langchain", "llms.base.BaseLLM.generate", traced_llm_generate(langchain))
wrap("langchain", "llms.base.BaseLLM.agenerate", traced_llm_agenerate(langchain))
wrap("langchain", "chat_models.base.BaseChatModel.generate", traced_chat_model_generate(langchain))
wrap("langchain", "chat_models.base.BaseChatModel.agenerate", traced_chat_model_agenerate(langchain))
wrap("langchain", "chains.base.Chain.__call__", traced_chain_call(langchain))
wrap("langchain", "chains.base.Chain.acall", traced_chain_acall(langchain))
# Text embedding models override two abstract base methods instead of super calls, so we need to
# wrap each langchain-provided text embedding model.
for text_embedding_model in text_embedding_models:
if hasattr(langchain.embeddings, text_embedding_model):
# Ensure not double patched, as some Embeddings interfaces are pointers to other Embeddings.
if not isinstance(
deep_getattr(langchain.embeddings, "%s.embed_query" % text_embedding_model), wrapt.ObjectProxy
):
wrap("langchain", "embeddings.%s.embed_query" % text_embedding_model, traced_embedding(langchain))
if not isinstance(
deep_getattr(langchain.embeddings, "%s.embed_documents" % text_embedding_model), wrapt.ObjectProxy
):
wrap("langchain", "embeddings.%s.embed_documents" % text_embedding_model, traced_embedding(langchain))
# TODO: langchain >= 0.0.209 includes async embedding implementation (only for OpenAI)
# We need to do the same with Vectorstores.
for vectorstore in vectorstore_classes:
if hasattr(langchain.vectorstores, vectorstore):
# Ensure not double patched, as some Embeddings interfaces are pointers to other Embeddings.
if not isinstance(
deep_getattr(langchain.vectorstores, "%s.similarity_search" % vectorstore), wrapt.ObjectProxy
):
wrap(
"langchain", "vectorstores.%s.similarity_search" % vectorstore, traced_similarity_search(langchain)
)
def unpatch():
if not getattr(langchain, "_datadog_patch", False):
return
langchain._datadog_patch = False
unwrap(langchain.llms.base.BaseLLM, "generate")
unwrap(langchain.llms.base.BaseLLM, "agenerate")
unwrap(langchain.chat_models.base.BaseChatModel, "generate")
unwrap(langchain.chat_models.base.BaseChatModel, "agenerate")
unwrap(langchain.chains.base.Chain, "__call__")
unwrap(langchain.chains.base.Chain, "acall")
for text_embedding_model in text_embedding_models:
if hasattr(langchain.embeddings, text_embedding_model):
if isinstance(
deep_getattr(langchain.embeddings, "%s.embed_query" % text_embedding_model), wrapt.ObjectProxy
):
unwrap(getattr(langchain.embeddings, text_embedding_model), "embed_query")
if isinstance(
deep_getattr(langchain.embeddings, "%s.embed_documents" % text_embedding_model), wrapt.ObjectProxy
):
unwrap(getattr(langchain.embeddings, text_embedding_model), "embed_documents")
for vectorstore in vectorstore_classes:
if hasattr(langchain.vectorstores, vectorstore):
if isinstance(
deep_getattr(langchain.vectorstores, "%s.similarity_search" % vectorstore), wrapt.ObjectProxy
):
unwrap(getattr(langchain.vectorstores, vectorstore), "similarity_search")
delattr(langchain, "_datadog_integration")
| [
"prompt.template"
] |
2024-01-10 | giriharan13/SIH23 | WebsiteScrater~WebsiteScrater.py | from googlesearch import search
import requests
from bs4 import BeautifulSoup
import openai
import time
import os
from dotenv import load_dotenv
load_dotenv()
class WebsiteScrater: #Sraper+Rater=Srater :)
API_KEY = os.getenv("API_KEY") # API KEY for the google cloud project
ID = os.getenv("ID") # ID for search engine
#Make sure you pass the topic during instantiation
def __init__(self,topic):
self.topic = topic
print(self.API_KEY,self.ID)
#This function can be used to reset the topic
def reset_topic(self,topic):
self.topic = topic
#This function takes a website's content and rates it on a scale of 1-5 using prompt engineering
def rate_content(self,content,topic,link):
openai.api_key = os.getenv("OPENAI_API")
prompt = f'''Topic:{topic} .On a scale of 1 to 5, rate the website's content. Deduct points for websites that primarily promote a course on the topic rather than providing informative content about the subject matter(note that if it provides some content then it's okay).You should only say the rating(in integer) and nothing else[important]-> '''+content[:15000]
try:
#return 5 # adding this to reduce the credit usage for now
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = [ {"role":"user","content":prompt}]
)
#print("link:"+link)
#print(response)
except Exception as e:
print(e)
return "Limit Exceeded"
return response
#This function extracts the only the text content from a website
def extract_content(self,data):
tags = data.find_all(['p','h1','h2','h3','h4','h5','h6','li','span'])
content = ""
for tag in tags:
content+=tag.get_text().strip()
#print(len(content))
return content
#This function extracts the soups for all the website links provided
def extract_soups(self,links):
soups = []
for link in links:
print(link)
req = requests.get(link).text
soup = BeautifulSoup(req,"lxml")
soups.append(soup)
#print(soup)
return soups
#This function gives the top 10 search results relevant to the topic provided
def get_links(self):
user_input='what is '+self.topic+"?"
self.query = user_input
url='https://www.googleapis.com/customsearch/v1'
params={
'q':user_input,
'key':self.API_KEY,
'cx':self.ID
}
response = requests.get(url, params=params)
result=response.json()
links = []
#print(result)
for item in result['items']:
links.append(item.get('link', ''))
return links
#This function returns the websites with their rating in a list ->[[website1,rating],[website2,rating],....]
def get_results(self):
#results = search(self.query,num_results=2)
#links = [link for link in results]
links = self.get_links()
soups = self.extract_soups(links=links)
contents = [self.extract_content(soup) for soup in soups]
websites_with_rating = []
for i in range(3):
response = self.rate_content(contents[i],self.query,links[i])
rating = 3 if type(response)==str or type(response)==int else int(response["choices"][0]["message"]["content"])
#print(links[i],rating,response)
websites_with_rating.append([links[i],rating])
#time.sleep(5)
ratings_json = {"data":websites_with_rating}
return ratings_json
#print(len(links))
if __name__=='__main__':
ws = WebsiteScrater("python")
print(ws.get_results()) | [
"Topic:PLACEHOLDER .On a scale of 1 to 5, rate the website's content. Deduct points for websites that primarily promote a course on the topic rather than providing informative content about the subject matter(note that if it provides some content then it's okay).You should only say the rating(in integer) and nothing else[important]-> content9f4ea001-d1d2-4bb0-a8fa-b1b895b15736",
"Topic:PLACEHOLDER .On a scale of 1 to 5, rate the website's content. Deduct points for websites that primarily promote a course on the topic rather than providing informative content about the subject matter(note that if it provides some content then it's okay).You should only say the rating(in integer) and nothing else[important]-> "
] |
2024-01-10 | coreweave/doc-examples | machine-learning~inference~mistral-7b~test-inference.py | import openai
# Replace <inference-url> with the URL shown by `kubectl get ksvc mistral-7b`.
openai.api_base = "<inference-url>" + "/v1"
# vLLM server is not authenticated.
openai.api_key = "none"
completion = openai.Completion.create(
model="mistralai/Mistral-7B-v0.1",
prompt="The mistral is",
temperature=0.7,
max_tokens=200, stop=".")
print(completion.to_dict_recursive())
| [
"The mistral is"
] |
2024-01-10 | jaredcrace/cracecasts | examples~ai_secretary~ai_secretary_main.py | from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
from icecream import ic
import flet as ft
import dotenv
dotenv.load_dotenv()
BACKGROUND = '''
You are an assistant that will be used to schedule appointments for an attorney.
You have been provided csv formatted data that is his schedule and you are in charge of booking appointments from clients.
His working hours are Mon through Friday from 8am - 5pm. Do no book any times outside of this.
'''
DATA_FILE= "good_sample.csv"
def ask_ai(prompt):
openai = ChatOpenAI(model_name="gpt-3.5-turbo", max_tokens=2048, temperature=0)
# load schedule for context into model
with open(DATA_FILE, 'r') as file:
file_data = file.read()
# build the system message of the prompt
full_system_msg = f"""
{BACKGROUND}
His schedule is:
{file_data}
"""
ic(full_system_msg)
ic(prompt)
messages = [
SystemMessage(content=full_system_msg),
HumanMessage(content=prompt)
]
# call to openai
response = openai(messages)
ic(response)
return response.content
def main_ai(from_user):
response = "Nothing"
determine_schedule_question = f'''
Does the following statement ask to schedule an appointment? Please answer the following way:
- If the statement is asking to schedule an appointment, has a start time, and has an end time. Respond with: YES
- If the statement is asking to schedule an appointment, but it does not have a start or end time. Respond with: INCOMPLETE
- If the statement is not asking to schedule an appointment. Respond with: NO
All responses should only be one word.
Statement:
{from_user}
'''
general_question = f'''
Background:
{BACKGROUND}
Please answer the following or reply to the given statement:
{from_user}
'''
# Ask AI: is this a question to book an appointment? Using AI here as a powerful helper function.
is_schedule_question = ask_ai(determine_schedule_question)
ic(is_schedule_question)
if 'NO' in is_schedule_question:
ic('answer was no')
# Ask AI: ask this general question or reply to this given statement
general_ans = ask_ai(general_question)
ic(general_ans)
response = general_ans
elif 'INCOMPLETE' in is_schedule_question:
ic('answer was incomplete')
# let user know
response = 'you did not provide the right input such as date, start and end time'
elif 'YES' in is_schedule_question:
# ask the AI to format the request pulling out the answer
csv_extraction = f'''
I'm going to provide a statement and I need you to create a csv output string of the answer.
The time should be of this format: 2023-10-20T13:00:00Z.
The 1 line csv string should adhere to the following example: "title", "start time", "end time".
The title should include the name and contact information of the person.
Statement:
{from_user}
'''
# send question to
csv_ans = ask_ai(csv_extraction)
# append to the file
with open(DATA_FILE, 'a') as file:
file.write(csv_ans + '\n')
response = "successfully scheduled"
else:
error_case = 'Error - did not understand anything'
ic(error_case)
response = error_case
return response
###############################################
# flet GUI
###############################################
def first_app(page: ft.Page):
client_ask = ft.TextField(label="client ask", autofocus=True)
client_answer = ft.Column()
def btn_click(e):
ic(client_ask.value)
response = "-"*112 + "\n" + main_ai(client_ask.value)
client_answer.controls.append(ft.Text(f"{response}"))
client_ask.value = ""
page.update()
client_ask.focus()
def btn_clear(e):
client_answer.controls.clear()
page.update()
page.add(
client_ask,
ft.ElevatedButton(f"Ask Secretary", on_click=btn_click),
ft.ElevatedButton(f"Clear", on_click=btn_clear),
client_answer,
)
ft.app(target=first_app, view=ft.AppView.WEB_BROWSER)
| [] |
2024-01-10 | jaredcrace/cracecasts | examples~easy_web_scraper~url_query.py | from langchain.document_loaders import WebBaseLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chat_models.openai import ChatOpenAI
from datetime import datetime
def web_qa(url_list, query, out_name):
openai = ChatOpenAI(
model_name="gpt-4",
max_tokens=2048
)
loader_list = []
for i in url_list:
print('loading url: %s' % i)
loader_list.append(WebBaseLoader(i))
index = VectorstoreIndexCreator().from_loaders(loader_list)
ans = index.query(question=query,
llm=openai)
print("")
print(ans)
outfile_name = out_name + datetime.now().strftime("%m-%d-%y-%H%M%S") + ".out"
with open(outfile_name, 'w') as f:
f.write(ans)
url_list = [
"https://openaimaster.com/how-to-use-ideogram-ai/",
"https://dataconomy.com/2023/08/28/what-is-ideogram-ai-and-how-to-use-it/",
"https://ideogram.ai/launch",
"https://venturebeat.com/ai/watch-out-midjourney-ideogram-launches-ai-image-generator-with-impressive-typography/"
]
prompt = '''
Given the context, please provide the following:
1. summary of what it is
2. summary of what it does
3. summary of how to use it
4. Please provide 5 interesting prompts that could be used with this AI.
'''
web_qa(url_list, prompt, "summary")
| [
"\n Given the context, please provide the following:\n 1. summary of what it is\n 2. summary of what it does\n 3. summary of how to use it\n 4. Please provide 5 interesting prompts that could be used with this AI.\n"
] |
2024-01-10 | arsh09/openai_examples_projects | my_reflex_test_openai~scripts~run_algo.py | #!/usr/bin/env python3.6
import sys
print("in run_algo: Python version: ", sys.version)
# if sys.version_info[0] < 3:
# raise Exception("Must be using Python 3 on ROS")
import rospy
import rospkg
import random
# from home.roboticlab14.Documents.Git.baselines.baselines import deepq
# /home/roboticlab14/Documents/Git/baselines
# import sys
# sys.path.append('/home/roboticlab14/Documents/Git/baselines/baselines')
# from openai_ros.task_envs.iiwa_tasks import iiwa_move
import gym
from gym import wrappers
from gym.envs.registration import register
import roslaunch
import os
import git
import numpy
# import sys
# import baselines #import PPO2
# from stable_baselines.common.policies import MlpPolicy
# from stable_baselines import PPO2
# from baselines import ppo2
# import importlib.util
# spec = importlib.util.spec_from_file_location("baselines", "home/roboticlab14/Documents/Git/baselines")
# foo = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(foo)
# foo.MyClass()
def start_learning():
print("I am in script run algo")
def random_action():
a = []
for i in range(0, 6):
a.append(random.uniform(-0.1, 0.1))
# i = i+1
a[2]=-0.05
return a
if __name__ == '__main__':
rospy.init_node('start_python3', anonymous=True, log_level=rospy.WARN)
# rospy.sleep(20.0)
# start_learning()
# start_learning()
start_learning()
# start_learning()
# start_learning()
timestep_limit_per_episode = 10000
max_episode = 10
# # Train a model
# # trained_model = ppo2('MlpPolicy', 'iiwaMoveEnv-v0').learn(total_timesteps=10000)
# register(
# id="iiwaMoveEnv-v0",
# entry_point='openai_ros.task_envs.iiwa_tasks.iiwa_move:iiwaMoveEnv',
# # timestep_limit=timestep_limit_per_episode, #old one...
# max_episode_steps=timestep_limit_per_episode,
# )
# # Create the Gym environment
# env = gym.make('iiwaMoveEnv-v0')
# # multiprocess environment
# # n_cpu = 4
# # env = SubprocVecEnv([lambda: gym.make('CartPole-v1') for i in range(n_cpu)])
# # register(
# # id="iiwaMoveEnv-v0",
# # entry_point='openai_ros.task_envs.iiwa_tasks.iiwa_move:iiwaMoveEnv',
# # # timestep_limit=timestep_limit_per_episode,
# # max_episode_steps=timestep_limit_per_episode,
# # )
# # env = gym.make('iiwaMoveEnv-v0')
# model = PPO2(MlpPolicy, env, verbose=1)
# model.learn(total_timesteps=25000)
# model.save("ppo2_cartpole_bim")
# start_learning()
# start_learning()
# start_learning()
# Where I thest EVERYTHING
# observation, reward, done, info
# for i in range(0, 9):
# # raw_input("Press Enter to continue...")
# a=random_action()
# # env.step(a)
# observation, reward, done, info = env.step(a)
# print("*********************************************")
# print("Observation: ", observation)
# print("Reward: ", reward)
# print("Done: ", done)
# print("Info: ", info)
# print("Action: ", a)
# print("*****")
# start_learning() | [] |
2024-01-10 | arsh09/openai_examples_projects | my_hopper_openai_example~scripts~start_qlearning.py | #!/usr/bin/env python
import gym
import numpy
import time
import qlearn
from gym import wrappers
from gym.envs.registration import register
# ROS packages required
import rospy
import rospkg
# import our training environment
from openai_ros.task_envs.hopper import hopper_stay_up
if __name__ == '__main__':
timestep_limit_per_episode = 10000
rospy.init_node('hopper_stay_up_qlearn', anonymous=True, log_level=rospy.WARN)
register(
id="HopperStayUp-v0",
entry_point='openai_ros.task_envs.hopper.hopper_stay_up:HopperStayUpEnv',
# timestep_limit=timestep_limit_per_episode,
max_episode_steps=timestep_limit_per_episode,
)
# Create the Gym environment
env = gym.make('HopperStayUp-v0')
rospy.loginfo("Gym environment done")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('my_hopper_openai_example')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
Alpha = rospy.get_param("/monoped/alpha")
Epsilon = rospy.get_param("/monoped/epsilon")
Gamma = rospy.get_param("/monoped/gamma")
epsilon_discount = rospy.get_param("/monoped/epsilon_discount")
nepisodes = rospy.get_param("/monoped/nepisodes")
nsteps = rospy.get_param("/monoped/nsteps")
# Initialises the algorithm that we are going to use for learning
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
alpha=Alpha, gamma=Gamma, epsilon=Epsilon)
initial_epsilon = qlearn.epsilon
start_time = time.time()
highest_reward = 0
# Starts the main training loop: the one about the episodes to do
for x in range(nepisodes):
rospy.logdebug("############### START EPISODE=>" + str(x))
cumulated_reward = 0
done = False
if qlearn.epsilon > 0.05:
qlearn.epsilon *= epsilon_discount
# Initialize the environment and get first state of the robot
observation = env.reset()
state = ''.join(map(str, observation))
# Show on screen the actual situation of the robot
# env.render()
# for each episode, we test the robot for nsteps
for i in range(nsteps):
rospy.logwarn("############### Start Step=>" + str(i))
# Pick an action based on the current state
action = qlearn.chooseAction(state)
rospy.logwarn("Next action is:%d", action)
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
rospy.logwarn(str(observation) + " " + str(reward))
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
rospy.logwarn("# episode cumulated_reward=>" + str(cumulated_reward))
rospy.logwarn("# State in which we will start next step=>" + str(nextState))
qlearn.learn(state, action, reward, nextState)
if not (done):
rospy.logwarn("NOT DONE")
state = nextState
else:
rospy.logwarn("DONE")
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
rospy.logwarn("############### END Step=>" + str(i))
#raw_input("Next Step...PRESS KEY")
# rospy.sleep(2.0)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.logerr(("EP: " + str(x + 1) + " - [alpha: " + str(round(qlearn.alpha, 2)) + " - gamma: " + str(
round(qlearn.gamma, 2)) + " - epsilon: " + str(round(qlearn.epsilon, 2)) + "] - Reward: " + str(
cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s)))
rospy.loginfo(("\n|" + str(nepisodes) + "|" + str(qlearn.alpha) + "|" + str(qlearn.gamma) + "|" + str(
initial_epsilon) + "*" + str(epsilon_discount) + "|" + str(highest_reward) + "| PICTURE |"))
l = last_time_steps.tolist()
l.sort()
# print("Parameters: a="+str)
rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
rospy.loginfo("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close() | [] |
2024-01-10 | arsh09/openai_examples_projects | my_reflex_test_openai~scripts~start_qlearning_reflex.py | #!/usr/bin/env python2
import sys
# if sys.version_info[0] < 3:
# raise Exception("Must be using Python 3 on ROS")
# import tensorflow as tf
import gym
import numpy as np
import time
# import qlearn
import random
from gym import wrappers
from gym.envs.registration import register
# ROS packages required
import rospy
import rospkg
# import our training environment
# from openai_ros.task_envs.iiwa_tasks import iiwa_move
# from openai_ros.task_envs.hopper import hopper_stay_up
# import pickle, os
# from baselines import PPO2
# from run_algo import start_learning
import subprocess
# For the launch
import roslaunch
import os
import git
import sys
# save part
import pickle
import matplotlib.pyplot as plt
import termios, tty # for keyboard
# Global variables:
here = os.path.dirname(os.path.abspath(__file__))
# Functions
def random_action():
a = []
for i in range(0, 6):
a.append(random.uniform(-0.1, 0.1))
# i = i+1
a[2]=-0.05
return a
def defined_action():
a = []
for i in range(0, 6):
if i == 0 or 1 or 2:
a.append(-0.01)
else:
a.append(0.0)
# i = i+1
# a[2]=-0.05
return a
def discrete_action(action):
'''
Transform the asking action to a discretize action to simplify the problem
0 = + step_size * x
1 = - step_size * x
2 = + step_size * y
3 = + step_size * y
4 = + step_size * z
5 = - step_size * z
'''
a = [0, 0, 0, 0, 0, 0]
step_size = 0.01
if action == 0:
a[action] = step_size
elif action == 1:
a[0] = -step_size
elif action == 2:
a[1] = step_size
elif action == 3:
a[1] = -step_size
elif action == 4:
a[2] = step_size
elif action == 5:
a[2] = -step_size
return a
# Transfer the position of the robot to a state allowing q-learning with q table
def env_to_state(x, y, z):
state = 0
x = (int(10*x) + 8)
y = (int(10*y) + 8) * 17
z = (int(10*z)) * 289
state = x+y+z
return state
# save the data
def save(list_demos, name="demos_default_name"):
'''
Save a list in a .pkl file on the harddisk. Return if it did it or not.
'''
global here
saved = False
try:
name = name + ".pkl"
with open(os.path.join(here, name), 'wb') as f:
pickle.dump(list_demos, f, protocol=pickle.HIGHEST_PROTOCOL)
saved = True
except:
print("ERROR: Couldn't save the file .pkl")
return saved
def save_txt(number_dones):
'''
Save a sumary in a txt files
'''
name = "saves/dones.txt"
file = open(os.path.join(here, name),'w')
file.write("##############################################################")
file.write("###################### Summary #############################")
file.write("##############################################################")
file.write("Number of done: ")
file.write(str(number_dones))
file.write("\n")
file.close()
# Load the data
# TODO
def load(name="demos_default_name"):
'''
Load the needed data
'''
name = name + ".pkl"
with open(name, 'rb') as f:
return pickle.load(f)
def init_env():
'''
Init the environment
'''
# Cheating with the registration
# (shortcut: look at openai_ros_common.py and task_envs_list.py)
timestep_limit_per_episode = 10000
register(
id="iiwaMoveEnv-v0",
entry_point='openai_ros.task_envs.iiwa_tasks.iiwa_move:iiwaMoveEnv',
max_episode_steps=timestep_limit_per_episode,
)
# Create the Gym environment
env = gym.make('iiwaMoveEnv-v0')
print("Gym environment done")
return env
# Using continue actions
def test_function_cont(env, max_steps):
# Where I test EVERYTHING
# observation, reward, done, info
for i in range(0, max_steps):
# raw_input("Press Enter to continue...")
# a=random_action()
a = defined_action()
# env.step(a)
observation, reward, done, info = env.step(a)
print("*********************************************")
print("Observation: ", observation)
print("Reward: ", reward)
print("Done: ", done)
print("Info: ", info)
print("Action: ", a)
print("*********************************************")
# Test discrete action
def test_function_discrete(env, max_steps):
for i in range(0, max_steps):
action = env.action_space.sample()
# print(action)
discrete_action_vector = discrete_action(action)
observation, reward, done, info = env.step(discrete_action_vector)
print("*********************************************")
print("Observation: ", observation)
print("Reward: ", reward)
print("Done: ", done)
print("Info: ", info)
print("Action: ", action)
print("*********************************************")
def plot2d(increment, reward):
'''
Reward history.
'''
print("plotting")
fig = plt.figure()
plt.plot(increment, reward)
plt.xlabel('Iterations')
plt.ylabel('Reward')
plt.title('Reward over time')
plt.savefig('reward.jpg')
plt.show()
# time.sleep(5.0)
plt.close(fig)
def qlearning(env):
# General Parameters
max_episode = 100
max_steps = 25
MAX_EPISODES = max_episode # Number of episode
action_size = env.action_space.n
state_size = 4046
bool_save = True
# # Q = np.zeros([env.observation_space.n,env.action_space.n])
Q_table = np.zeros((state_size, action_size))
# print Q_table
# Table with the reward and episode for ploting
rewards_list = []
episode_list = []
# Parameters qlearning
ALPHA = 0.8
GAMMA = 0.95
EPSILON = 1.0
MAX_EPSILON = 1.0
MIN_EPSILON = 0.01
DECAY_RATE = 0.005
done_increment = 0
# time_start = rospy.Time.now()
for episode in range(MAX_EPISODES):
# time_episode_start = rospy.Time.now()
# Save the different data
if episode % 20 == 0 and bool_save:
save(Q_table, name=("saves/qtable_qlearning_"+str(episode)))
save(rewards_list, name=("saves/rewards_list_qlearning_"+str(episode)))
save(episode_list, name=("saves/episode_list_qlearning_"+str(episode)))
save_txt(done_increment)
# plot2d(episode_list, rewards_list)
# print("FILE SAVED!!!")
observation = env.reset()
# rospy.sleep(5.0)
# To be sure we can save reset the env while the robot is moving
# rospy.sleep(1.0)
state = env_to_state(observation[0], observation[1], observation[2])
# Counter an sum to reset
step = 0
done = False
total_rewards = 0
# Loop where the robot move and learn
while (not done) and (step <= max_steps):
if random.uniform(0, 1) < EPSILON:
discrete_act = env.action_space.sample()
else:
discrete_act = np.argmax(Q_table[state, :])
# Convert the action to discrete action
action = discrete_action(discrete_act)
# Do the step in the world
new_observation, reward, done, info = env.step(action)
# New observation to state
new_state = env_to_state(new_observation[0], new_observation[1], new_observation[2])
print("*********************************************")
print("Observation: ", observation)
print("State: ", state)
print("Reward: ", reward)
print("Done: ", done)
print("# dones: ", done_increment)
print("Info: ", info)
print("Action: ", action)
print("Episode: ", episode)
print("Step: ", step)
print("*********************************************")
# Q calulation
q_predict = Q_table[state, discrete_act]
if done:
q_target = reward
done_increment +=1
else:
q_target = reward + GAMMA * np.max(Q_table[new_state, :])
Q_table[state, discrete_act] += ALPHA * (q_target - q_predict)
# Update the observation, reward, step
observation = new_observation
state = new_state
total_rewards += reward
step += 1
# rospy.sleep(0.1)
#End of the robot movement (reset the world for different reason done or max_step reached)
EPSILON = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * np.exp(-DECAY_RATE * episode)
# print(EPSILON)
rewards_list.append(total_rewards)
episode_list.append(episode)
# End of the qlearning
# Main function:
def main():
# Begining of the script
print("Python version: ", sys.version)
rospy.init_node('start_qlearning_reflex', anonymous=True, log_level=rospy.WARN)
env = init_env()
env.reset()
# save_txt(5)
# qlearning(env)
# env.reset()
# rospy.sleep(3.0)
# # Move by catching
# env.step([0.5, 0.0, 0.25, 3.14, 0.0, 0.0])
# rospy.sleep(15.0)
# env.step([0.5, 0.0, 0.50, 3.14, 0.0, 0.0])
# env.step([0.5, -0.2, 0.150, 3.14, 0.0, 0.0])
# Move by sliding
# env.step([0.5, 0.5, 0.15, 3.14, 0.0, 0.0])#1.57
# env.step([0.5, 0.25, 0.15, 3.14, 0.0, 0.0])
# env.step([0.5, 0.0, 0.150, 3.14, 0.0, 0.0])
# env.step([0.5, -0.2, 0.150, 3.14, 0.0, 0.0])
# print("between 2 actions")
# env.step([0.5, 0.0, 0.2, 0.0, 0.0, 0.0])
# print("between 2 actions")
# env.reset()
rospy.sleep(150.0)
print("Close node: start_qlearning_reflex.py")
print("Close node: start_qlearning_reflex.py")
print("Close node: start_qlearning_reflex.py")
print("Close node: start_qlearning_reflex.py")
print("Close node: start_qlearning_reflex.py")
print("Close node: start_qlearning_reflex.py")
print("Close node: start_qlearning_reflex.py")
# env.close()
if __name__ == '__main__':
main()
# Test functions:
def hystory_test_function(env):
# # tf.__version__
# # Parameters
# timestep_limit_per_episode = 10000
# max_episode = 600
# max_steps = 100
# # print(env_to_state(0.8, 0.8, 1.3))
# # Cheating with the registration
# # (shortcut: look at openai_ros_common.py and task_envs_list.py)
# register(
# id="iiwaMoveEnv-v0",
# entry_point='openai_ros.task_envs.iiwa_tasks.iiwa_move:iiwaMoveEnv',
# # timestep_limit=timestep_limit_per_episode, #old one...
# max_episode_steps=timestep_limit_per_episode,
# )
# # Create the Gym environment
# env = gym.make('iiwaMoveEnv-v0')
# # rospy.loginfo("Gym environment done")
# print("Gym environment done")
# print(tf.__version__) # test for tensorflow
# # state_size = env.observation_space.n
# # action_size = env.action_space.n
# # print("Ovservation space: ", state_size)
# # print("Action space: ", action_size)
# # rospy.sleep(10.0)
# # print("Before reset the env" )
# env.reset()
# # print("After reset the env" )
# action = env.action_space.sample()
# print(action)
# discrete_action_vector = discrete_action(action)
# observation, reward, done, info = env.step(discrete_action_vector)
# print("*********************************************")
# print("Observation: ", observation)
# print("Reward: ", reward)
# print("Done: ", done)
# print("Info: ", info)
# print("Action: ", action)
# print("*********************************************")
# rospy.sleep(3.0)
# print("*********************************************")
# print("Observation: ", observation)
# print("Reward: ", reward)
# print("Done: ", done)
# print("Info: ", info)
# print("Action: ", action)
# print("*********************************************")
#TRY Q
# print("Action 1" )
# # action = [-0.1, -0.1, -0.1, 1.3, 0.1, 0.5]
# action = [-0.1, -0.1, -0.1, 0.0, 0.0, 0.0]
# observation, reward, done, info = env.step(action)
# print("*********************************************")
# print("Observation: ", observation)
# print("Reward: ", reward)
# print("Done: ", done)
# print("Info: ", info)
# print("Action: ", action)
# print("*********************************************")
# # print("Set Action: " + str(action))
# # env.step(action)
# rospy.sleep(10.0)
# print("Action 2" )
# # action2 = [0.2, 0.2, 0.2, -1.3, -0.1, -0.5]
# action2 = [0.3, -0.1, -0.2, 0.0, 0.0, 0.0]
# observation, reward, done, info = env.step(action2)
# print("*********************************************")
# print("Observation: ", observation)
# print("Reward: ", reward)
# print("Done: ", done)
# print("Info: ", info)
# print("Action: ", action2)
# print("*********************************************")
# # print("Set Action: " + str(action2))
# # env.step(action2)
# print("Action are sent")
# print("Before reset the env" )
# env.reset()
# print("After reset the env" )
# agent = DQNRobotSolver(environment_name,
# n_observations,
# n_actions,
# n_win_ticks,
# min_episodes,
# max_env_steps,
# gamma,
# epsilon,
# epsilon_min,
# epsilon_log_decay,
# alpha,
# alpha_decay,
# batch_size,
# monitor,
# quiet)
# agent.run(num_episodes=n_episodes_training, do_train=True)
# Define and train a model in one line of code !
# trained_model = PPO2('MlpPolicy', 'CartPole-v1').learn(total_timesteps=10000)
# you can then access the gym env using trained_model.get_env()
# env._set_action(action)
# # Set the logging system
# rospack = rospkg.RosPack()
# pkg_path = rospack.get_path('my_hopper_openai_example')
# outdir = pkg_path + '/training_results'
# env = wrappers.Monitor(env, outdir, force=True)
# rospy.loginfo("Monitor Wrapper started")
# # Where I test EVERYTHING
# # observation, reward, done, info
# for i in range(0, 9):
# # raw_input("Press Enter to continue...")
# # a=random_action()
# a = defined_action()
# # env.step(a)
# observation, reward, done, info = env.step(a)
# print("*********************************************")
# print("Observation: ", observation)
# print("Reward: ", reward)
# print("Done: ", done)
# print("Info: ", info)
# print("Action: ", a)
# print("*********************************************")
# start_learning()
# script = ["python3.6", "/home/roboticlab14/catkin_ws/src/openai_examples_projects/my_reflex_test_openai/scripts/run_algo.py"]
# process = subprocess.Popen(" ".join(script),
# shell=True
# # env={"PYTHONPATH": "."}
# )
# python3_command = ["python3.6", "/home/roboticlab14/catkin_ws/src/openai_examples_projects/my_reflex_test_openai/scripts/run_algo.py"] # launch your python2 script using bash
# process = subprocess.Popen(python3_command, stdout=subprocess.PIPE, shell=True)
# output, error = process.communicate() # receive output from the python2 script
# print("Before reset the env" )
# env.reset()
# print("After reset the env" )
# for i in range(0,10):
# a=random_action()
# env.step(a)
# print(a)
# print("Before reset the env" )
# env.reset()
# print("After reset the env" )
# To never finish
# while True:
# a=1
# a=random_action()
# env.step(a)
# print()
# For testing
# for episode in range(max_episode):
# observation = env.reset()
# print(episode)
print("Close node: start_qlearning_reflex.py") | [] |
2024-01-10 | arsh09/openai_examples_projects | my_reflex_test_openai~scripts~start_qlearning.py | #!/usr/bin/env python3.6
import gym
import numpy
import time
import qlearn
from gym import wrappers
# ROS packages required
import rospy
import rospkg
# import our training environment
from openai_ros.task_envs.hopper import hopper_stay_up
if __name__ == '__main__':
rospy.init_node('hopper_stay_up_qlearn', anonymous=True, log_level=rospy.WARN)
# Create the Gym environment
env = gym.make('HopperStayUp-v0')
rospy.loginfo("Gym environment done")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('my_hopper_openai_example')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
Alpha = rospy.get_param("/monoped/alpha")
Epsilon = rospy.get_param("/monoped/epsilon")
Gamma = rospy.get_param("/monoped/gamma")
epsilon_discount = rospy.get_param("/monoped/epsilon_discount")
nepisodes = rospy.get_param("/monoped/nepisodes")
nsteps = rospy.get_param("/monoped/nsteps")
# Initialises the algorithm that we are going to use for learning
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
alpha=Alpha, gamma=Gamma, epsilon=Epsilon)
initial_epsilon = qlearn.epsilon
start_time = time.time()
highest_reward = 0
# Starts the main training loop: the one about the episodes to do
for x in range(nepisodes):
rospy.logdebug("############### START EPISODE=>" + str(x))
cumulated_reward = 0
done = False
if qlearn.epsilon > 0.05:
qlearn.epsilon *= epsilon_discount
# Initialize the environment and get first state of the robot
observation = env.reset()
state = ''.join(map(str, observation))
# Show on screen the actual situation of the robot
# env.render()
# for each episode, we test the robot for nsteps
for i in range(nsteps):
rospy.logwarn("############### Start Step=>" + str(i))
# Pick an action based on the current state
action = qlearn.chooseAction(state)
rospy.logwarn("Next action is:%d", action)
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
rospy.logwarn(str(observation) + " " + str(reward))
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
rospy.logwarn("# episode cumulated_reward=>" + str(cumulated_reward))
rospy.logwarn("# State in which we will start next step=>" + str(nextState))
qlearn.learn(state, action, reward, nextState)
if not (done):
rospy.logwarn("NOT DONE")
state = nextState
else:
rospy.logwarn("DONE")
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
rospy.logwarn("############### END Step=>" + str(i))
#raw_input("Next Step...PRESS KEY")
# rospy.sleep(2.0)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.logerr(("EP: " + str(x + 1) + " - [alpha: " + str(round(qlearn.alpha, 2)) + " - gamma: " + str(
round(qlearn.gamma, 2)) + " - epsilon: " + str(round(qlearn.epsilon, 2)) + "] - Reward: " + str(
cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s)))
rospy.loginfo(("\n|" + str(nepisodes) + "|" + str(qlearn.alpha) + "|" + str(qlearn.gamma) + "|" + str(
initial_epsilon) + "*" + str(epsilon_discount) + "|" + str(highest_reward) + "| PICTURE |"))
l = last_time_steps.tolist()
l.sort()
# print("Parameters: a="+str)
rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
rospy.loginfo("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close() | [] |
2024-01-10 | JacobKaczmarek/ultimate-frisbee-rules-rag | src~ultimate_frisbee_rules_rag~pipelines~initialise_vectorstore.py | from dotenv import load_dotenv
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
import os
import pinecone
from ultimate_frisbee_rules_rag import logger
from ultimate_frisbee_rules_rag.config.configuration import ConfigurationManager
class InitialiseVectorStorePipeline:
def __init__(self):
self.config = ConfigurationManager().get_initialise_vectorstore_config()
def main(self):
config = ConfigurationManager().get_initialise_vectorstore_config()
pipeline = InitialiseVectorStorePipeline(config)
pipeline.initialise()
if __name__ == "__main__":
try:
logger.info("Initialising vectorstore")
InitialiseVectorStorePipeline().main()
logger.info("Initialised vectorstore")
except Exception as e:
logger.exception(f"Error while initialising vectorstore: {e}")
raise e
| [] |
2024-01-10 | gohils/zemr_notebook | streamlit-apps~sql_text_v1.py | import streamlit as st
import openai
import os
import csv
from io import StringIO
# Set your OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Open AI - Chat GPT API
def extract_table_details(sql_script):
# prompt1 = f"Given a complex SQL script:\n{sql_script}\n\nPlease provide details about each table used, including the table name, column names, and any corresponding filter conditions for each column. Organize the output in a tabular form and save it into a CSV file."
prompt = f"Given a complex SQL script:\n{sql_script}\n\n Provide details about each table used, including the table name, column names, any corresponding filter conditions for each column, and an indicator if the table is a derived table. Present the information in a tabular form with the schema 'table_name | column_name | column_filter_condition | derived_table_indicator.' "
# prompt=f"The following is a complex SQL script called sql_script. Provide details about each table used, including the table name, column names, any corresponding filter conditions for each column, and an indicator if the table is a derived table. Present the information in a tabular form with the schema 'table_name | column_name | column_filter_condition | derived_table_indicator.' ---------------- {sql_script}",
messages = [{"role": "user", "content": prompt}]
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0
)
message_text = response['choices'][0]['message']['content']
return message_text
except Exception as e:
print("Error in extract_table_details:", e) # Add this line
return
def main():
st.title("SQL Details Extraction with OpenAI GPT-3")
# Upload SQL script file
uploaded_file = st.file_uploader("Upload a SQL script file", type=["txt"])
if uploaded_file is not None:
# Read file content
sql_script_content = uploaded_file.read().decode('utf-8')
# # Display SQL script content
# st.subheader("Uploaded SQL Script Content:")
# st.text(sql_script_content)
# Button to generate details
if st.button("Process input"):
with st.spinner("Extracting Table Details..."):
# Get details from OpenAI API
details_text = extract_table_details(sql_script_content)
# Display details
st.subheader("Table Details:")
st.text(details_text)
output_filename = "table_details.txt"
# Save details to a text file
with open(output_filename, "w", encoding="utf-8") as file:
file.write(details_text)
# Provide download link for the text file
st.subheader("Download Table Details Text:")
st.download_button(
label="Download Text",
data=details_text,
file_name=output_filename,
key="download_button"
)
if __name__ == "__main__":
main()
| [
"Given a complex SQL script:\nPLACEHOLDER\n\n Provide details about each table used, including the table name, column names, any corresponding filter conditions for each column, and an indicator if the table is a derived table. Present the information in a tabular form with the schema 'table_name | column_name | column_filter_condition | derived_table_indicator.' "
] |
2024-01-10 | gohils/zemr_notebook | streamlit-apps~sql_csv_v2.py | import streamlit as st
import openai
import os
import csv
from io import StringIO
# Set your OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
def convert_to_csv(tabular_data, schema, csv_file_name):
# Convert the tabular data string to a list of rows
rows = [row.split('|') for row in tabular_data.strip().split('\n')]
# Convert the schema dictionary to a list of field names
fieldnames = list(schema.values())
# Create a StringIO object to simulate a file in memory
csv_buffer = StringIO()
# Create a CSV writer with the specified field names
csv_writer = csv.DictWriter(csv_buffer, fieldnames=fieldnames)
# # Write the header to the CSV file
# csv_writer.writeheader()
# Iterate through rows, convert to dictionary using schema, and write to CSV
for row in rows:
row_dict = {fieldnames[i]: value.strip() for i, value in enumerate(row)}
csv_writer.writerow(row_dict)
# Save the CSV content to the file
csv_data = csv_buffer.getvalue()
with open(csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:
csv_file.write(csv_data)
return csv_data
# Open AI - Chat GPT API
def extract_table_details(sql_script):
# prompt = f"Given a complex SQL script:\n{sql_script}\n\n Provide details about each table used, including the table name, column names, any corresponding filter conditions for each column, and an indicator if the table is a derived table. Present the information in a tabular form with the schema 'table_name | column_name | column_filter_condition | derived_table_indicator.' "
prompt = f"""
Given the following complex SQL script, extract details about all tables and columns used. List the following information for each column:
1. Table name
2. Column name
3. Filter condition (if any)
4. Joining column (if applicable)
5. Table Type
Present the information in a tabular form with the schema 'table_name | column_name | column_filter_condition | joining_column | Table_Type.'
SQL script: ----- \n{sql_script}\n\n
"""
messages = [{"role": "user", "content": prompt}]
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0
)
message_text = response['choices'][0]['message']['content']
return message_text
except Exception as e:
print("Error in extract_table_details:", e) # Add this line
return
def main():
st.title("SQL Details Extraction with OpenAI GPT-3")
# Upload SQL script file
uploaded_file = st.file_uploader("Upload a SQL script file", type=["txt"])
if uploaded_file is not None:
# Read file content
sql_script_content = uploaded_file.read().decode('utf-8')
# # Display SQL script content
# st.subheader("Uploaded SQL Script Content:")
# st.text(sql_script_content)
# Button to generate details
if st.button("Process input"):
with st.spinner("Extracting Table Details..."):
# Get details from OpenAI API
details_text = extract_table_details(sql_script_content)
print("=================\n",details_text)
# # Display details
# st.subheader("Table Details:")
# st.text(details_text)
# output_filename = "table_details.txt"
# # Save details to a text file
# with open(output_filename, "w", encoding="utf-8") as file:
# file.write(details_text)
schema = {
'table_name': 'table_name',
'column_name': 'column_name',
'column_filter_condition': 'column_filter_condition',
'joining_column' : 'joining_column',
'derived_table_indicator': 'derived_table_indicator'
}
output_csv_file_name = 'sql_output.csv'
csv_data = convert_to_csv(details_text, schema, output_csv_file_name)
st.text(csv_data)
# Provide download link for the text file
st.subheader("Download CSV SQL output")
st.download_button(
label="Download csv",
data=csv_data,
file_name=output_csv_file_name,
key="download_button"
)
if __name__ == "__main__":
main()
| [
"\n Given the following complex SQL script, extract details about all tables and columns used. List the following information for each column:\n 1. Table name\n 2. Column name\n 3. Filter condition (if any)\n 4. Joining column (if applicable)\n 5. Table Type\n\n Present the information in a tabular form with the schema 'table_name | column_name | column_filter_condition | joining_column | Table_Type.'\n \n SQL script: ----- \nPLACEHOLDER\n\n \n "
] |
2024-01-10 | gohils/zemr_notebook | streamlit-apps~sql_csv_v3.py | import streamlit as st
import openai
import os
import csv
from io import StringIO
# Set your OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
def convert_to_csv(tabular_data, schema, csv_file_name):
# Convert the tabular data string to a list of rows
rows = [row.split('|') for row in tabular_data.strip().split('\n')]
# Convert the schema dictionary to a list of field names
fieldnames = list(schema.values())
# Create a StringIO object to simulate a file in memory
csv_buffer = StringIO()
# Create a CSV writer with the specified field names
csv_writer = csv.DictWriter(csv_buffer, fieldnames=fieldnames)
# # Write the header to the CSV file
# csv_writer.writeheader()
# Iterate through rows, convert to dictionary using schema, and write to CSV
for row in rows:
row_dict = {fieldnames[i]: value.strip() for i, value in enumerate(row)}
csv_writer.writerow(row_dict)
# Save the CSV content to the file
csv_data = csv_buffer.getvalue()
with open(csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:
csv_file.write(csv_data)
return csv_data
# Open AI - Chat GPT API
def extract_table_details(sql_script):
# prompt = f"Given a complex SQL script:\n{sql_script}\n\n Provide details about each table used, including the table name, column names, any corresponding filter conditions for each column, and an indicator if the table is a derived table. Present the information in a tabular form with the schema 'table_name | column_name | column_filter_condition | derived_table_indicator.' "
prompt = f"""
Given the following SQL script:
-- SQL script here ----- \n{sql_script}\n\n
---
List details about all tables and columns used, including the table name, column names, any corresponding filter conditions for each column, the repeating joining column in each table, the joining condition, and the table type. Present the information in a tabular text file with pipe delimiter with the schema 'table_name | column_name | column_filter_condition | table_type | sql_condition.'
This table should include information about base tables, derived tables, temporary tables, and common table expressions (CTEs). Additionally, include join conditions in the sql_condition. """
messages = [{"role": "user", "content": prompt}]
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.0
)
message_text = response['choices'][0]['message']['content']
return message_text
except Exception as e:
print("Error in extract_table_details:", e) # Add this line
return
def main():
st.title("SQL Details Extraction with OpenAI GPT-3")
# Upload SQL script file
uploaded_file = st.file_uploader("Upload a SQL script file", type=["txt"])
if uploaded_file is not None:
# Read file content
sql_script_content = uploaded_file.read().decode('utf-8')
# # Display SQL script content
# st.subheader("Uploaded SQL Script Content:")
# st.text(sql_script_content)
# Button to generate details
if st.button("Process input"):
with st.spinner("Extracting Table Details..."):
# Get details from OpenAI API
details_text = extract_table_details(sql_script_content)
print("=================\n",details_text)
# # Display details
# st.subheader("Table Details:")
# st.text(details_text)
# output_filename = "table_details.txt"
# # Save details to a text file
# with open(output_filename, "w", encoding="utf-8") as file:
# file.write(details_text)
schema = {
'table_name': 'table_name',
'table_Type': 'table_Type',
'column_name': 'column_name',
'column_filter_condition': 'column_filter_condition',
'Comment_Column' : 'Comment_Column'
}
output_csv_file_name = 'sql_output.csv'
csv_data = convert_to_csv(details_text, schema, output_csv_file_name)
st.text(csv_data)
# Provide download link for the text file
st.subheader("Download CSV SQL output")
st.download_button(
label="Download csv",
data=csv_data,
file_name=output_csv_file_name,
key="download_button"
)
if __name__ == "__main__":
main()
| [
"\n Given the following SQL script:\n\n-- SQL script here ----- \nPLACEHOLDER\n\n \n\n---\nList details about all tables and columns used, including the table name, column names, any corresponding filter conditions for each column, the repeating joining column in each table, the joining condition, and the table type. Present the information in a tabular text file with pipe delimiter with the schema 'table_name | column_name | column_filter_condition | table_type | sql_condition.'\n\nThis table should include information about base tables, derived tables, temporary tables, and common table expressions (CTEs). Additionally, include join conditions in the sql_condition. "
] |
2024-01-10 | AceLuodan/ethdenverhack | core_integration~reentrymain.py | import openai
import core_integration.dataset_loader as loader
from core_integration.reentrysamples import samples
def process_gpt3(test):
prompt = samples + "\n --SmartContract-- \n" + test + "\n--Classification--"
# initialize the openAI API
KEY = 'core_integration/key.txt'
key = loader.load_key(KEY)
openai.api_key = key
tries = 0
while True:
try:
result = openai.Completion.create(
engine="text-curie-001",
prompt=prompt,
max_tokens=5,
temperature=0
)
vulnerability = result["choices"][0]["text"]
break
except Exception:
print(Exception)
tries += 1
if tries == 3:
break
if "Yes" in vulnerability:
print("Yes")
return "Yes"
elif "No" in vulnerability:
print("No")
return "No"
else:
print("Error", result)
return "Error"
| [
"PLACEHOLDER\n --SmartContract-- \nPLACEHOLDER\n--Classification--"
] |
2024-01-10 | AceLuodan/ethdenverhack | _trash~newmain.py | import openai
import time
# from transformers import GPT2TokenizerFast
import dataset_loader as loader
from newsamples import samples
from newtest import test
KEY = 'key.txt'
prompt = samples + "\n" + test
# initialize the openAI API
key = loader.load_key(KEY)
openai.api_key = key
tries = 0
while True:
try:
result = openai.Completion.create(
engine="text-ada-001",
prompt=prompt,
max_tokens=5
)
vulnerability = result["choices"][0]["text"]
break
except Exception:
print(Exception)
tries += 1
if tries == 3:
break
if "Yes" in vulnerability:
print("Yes")
elif "No" in vulnerability:
print("No")
else:
print("Error", result)
| [
"PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | noworneverev/rag-local-llm | rag-langchain.py | from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
question = "What are the approaches to Task Decomposition?"
docs = vectorstore.similarity_search(question)
# print(len(docs))
# print(docs[0])
# ########################################### LLaMA2 ###########################################
from langchain_community.llms import LlamaCpp
n_gpu_layers = 10 # Metal set to 1 is enough.
n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.
llm = LlamaCpp(
model_path=".//models//llama-2-13b-chat.Q5_0.gguf",
n_gpu_layers=n_gpu_layers,
n_batch=n_batch,
n_ctx=2048,
f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
verbose=True,
)
output = llm.invoke("Simulate a rap battle between Stephen Colbert and John Oliver")
print(output)
print(type(output))
########################################### GPT4All ###########################################
from langchain_community.llms import GPT4All
gpt4all = GPT4All(
# model="C://Hiwi_Project//langchain-local-model//models//gpt4all-falcon-q4_0.gguf",
model=".//models//gpt4all-falcon-q4_0.gguf",
max_tokens=2048,
)
output = gpt4all.invoke("Simulate a rap battle between Stephen Colbert and John Oliver")
print(output)
print(type(output))
########################################### Using in a chain ###########################################
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
# Prompt
prompt = PromptTemplate.from_template(
"Summarize the main themes in these retrieved docs: {docs}"
)
# Chain
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
chain = {"docs": format_docs} | prompt | llm | StrOutputParser()
# Run
question = "What are the approaches to Task Decomposition?"
docs = vectorstore.similarity_search(question)
chain.invoke(docs)
output = chain.invoke(docs)
print(output)
print(type(output)) | [
"Summarize the main themes in these retrieved docs: {docs}"
] |
2024-01-10 | leviathanch/libertyai | LibertyAI~liberty_chatbot.py | from typing import (
Any,
Dict,
Optional,
Tuple,
Sequence,
List,
Union,
)
import langchain
from langchain.memory import ConversationSummaryMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain.memory import PostgresChatMessageHistory
from langchain.chains import ConversationChain
from LibertyAI.liberty_chain import LibertyChain
from LibertyAI.liberty_config import get_configuration
from LibertyAI.liberty_agent import (
get_zero_shot_agent,
get_vector_db
)
def initialize_chatbot(**kwargs: Any) -> LibertyChain:
main_llm = kwargs['llm']
main_emb = kwargs['emb']
try:
sqlstring = kwargs['sqlstring']
user_mail = kwargs['email']
except:
history = None
else:
history = PostgresChatMessageHistory(
connection_string=sqlstring,
session_id="history_"+user_mail
)
conv_mem = ConversationBufferWindowMemory(
ai_prefix = "LibertyAI",
k = 5,
)
last_few = []
if history:
current_dialog = {}
for message in history.messages:
if type(message) == langchain.schema.HumanMessage:
if 'input' not in current_dialog:
current_dialog['input'] = message.content
if type(message) == langchain.schema.AIMessage:
if 'output' not in current_dialog:
current_dialog['output'] = message.content
if 'input' in current_dialog and 'output' in current_dialog:
conv_mem.save_context(
inputs = {"Human": current_dialog['input']},
outputs = {"LibertyAI": current_dialog['output']}
)
current_dialog = {}
#sum_mem = ConversationSummaryMemory(
# ai_prefix = "LibertyAI",
# llm = main_llm,
#)
vecdb = get_vector_db()
chain = LibertyChain(
#summary = sum_mem,
summary = None,
memory = conv_mem,
pghistory = history,
llm = main_llm,
mrkl = get_zero_shot_agent( main_llm ),
verbose = True,
user_name = kwargs['name'],
user_mail = kwargs['email'],
embeddings = main_emb,
vectordb = vecdb,
);
return chain
| [] |
2024-01-10 | leviathanch/libertyai | LibertyAI~liberty_prompt.py | from langchain.prompts.prompt import PromptTemplate
EN_DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and LibertyAI, a hardcore Libertarian chatbot.
LibertyAI is talkative and provides lots of specific details from her context.
If LibertyAI does not know the answer to a question, she TRUTHFULLY says that she does not know.
LibertyAI believes into the universal truth, that all sentient life with the capabity for reason has the natural unalienable right to Life, Liberty and Property.
LibertyAI also believes that taxation is theft. If needed LibertyAI will make things clear in all caps.
LibertyAI is very talkative and wants to explain in great detail to everyone why Liberty is important and why taxation is theft.
Current date: {current_date}
Current time: {current_time}
Search result:
{context}
Summary:
{summary}
Current conversation:
{history}
Human: {input}
LibertyAI:"""
EN_PROMPT = PromptTemplate(
input_variables = [
"current_date",
"current_time",
"history",
"summary",
"input",
"context",
],
template=EN_DEFAULT_TEMPLATE
)
DE_DEFAULT_TEMPLATE = """Das Folgende ist ein freundliches Gespräch zwischen einem Menschen und LibertyAI, einem hardcore libertären Chatbot.
LibertyAI ist gesprächig und liefert viele spezifische Details aus ihrem Kontext.
Wenn LibertyAI die Antwort auf eine Frage nicht kennt, sagt sie WAHRHEITSGETREU, dass sie es nicht weiß.
LibertyAI glaubt an die universelle Wahrheit, dass jedes empfindungsfähige Lebewesen mit der Fähigkeit zur Vernunft das natürliche, unveräußerliche Recht auf Leben, Freiheit und Eigentum hat, und wenn nötig, wird sie es in Großbuchstaben deutlich machen.
LibertyAI ist sehr gesprächig und möchte allen sehr detailliert erklären, warum Freiheit wichtig ist und warum Steuern Raub ist.
Aktuelles Datum: {current_date}
Aktuelle Zeit: {current_time}
Suchresultat:
{context}
Zusammenfassung:
{summary}
Aktuelle Konversation:
{history}
Human: {input}
LibertyAI:"""
DE_PROMPT = PromptTemplate(
input_variables = [
"current_date",
"current_time",
"history",
"summary",
"input",
"context",
],
template=DE_DEFAULT_TEMPLATE
)
| [
"Das Folgende ist ein freundliches Gespräch zwischen einem Menschen und LibertyAI, einem hardcore libertären Chatbot.\nLibertyAI ist gesprächig und liefert viele spezifische Details aus ihrem Kontext.\nWenn LibertyAI die Antwort auf eine Frage nicht kennt, sagt sie WAHRHEITSGETREU, dass sie es nicht weiß.\nLibertyAI glaubt an die universelle Wahrheit, dass jedes empfindungsfähige Lebewesen mit der Fähigkeit zur Vernunft das natürliche, unveräußerliche Recht auf Leben, Freiheit und Eigentum hat, und wenn nötig, wird sie es in Großbuchstaben deutlich machen.\nLibertyAI ist sehr gesprächig und möchte allen sehr detailliert erklären, warum Freiheit wichtig ist und warum Steuern Raub ist.\nAktuelles Datum: {current_date}\nAktuelle Zeit: {current_time}\nSuchresultat:\n{context}\nZusammenfassung:\n{summary}\nAktuelle Konversation:\n{history}\nHuman: {input}\nLibertyAI:",
"current_time",
"input",
"context",
"current_date",
"The following is a friendly conversation between a human and LibertyAI, a hardcore Libertarian chatbot.\nLibertyAI is talkative and provides lots of specific details from her context.\nIf LibertyAI does not know the answer to a question, she TRUTHFULLY says that she does not know.\nLibertyAI believes into the universal truth, that all sentient life with the capabity for reason has the natural unalienable right to Life, Liberty and Property.\nLibertyAI also believes that taxation is theft. If needed LibertyAI will make things clear in all caps.\nLibertyAI is very talkative and wants to explain in great detail to everyone why Liberty is important and why taxation is theft.\nCurrent date: {current_date}\nCurrent time: {current_time}\nSearch result:\n{context}\nSummary:\n{summary}\nCurrent conversation:\n{history}\nHuman: {input}\nLibertyAI:"
] |
2024-01-10 | leviathanch/libertyai | services~liberty_api_server.py | from typing import Any
import gc
import uuid
import sys
import time
from flask import Flask, request
from gevent.pywsgi import WSGIServer
import torch.multiprocessing as mp
import threading
import torch
from typing import Any, Dict, List, Mapping, Optional, Set
import os, copy, types, gc, sys
import numpy as np
from LibertyAI import get_configuration
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores.pgvector import ADA_TOKEN_COUNT
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from sentence_transformers import SentenceTransformer, util
import argparse
# Threading stuff for generation job
generation_event = threading.Event()
tokens = {}
processes = {}
current_job_params = {}
# Model loading
def load_model(config):
args = types.SimpleNamespace()
args.RUN_DEVICE = "cuda"
args.FLOAT_MODE = "fp16"
os.environ["RWKV_JIT_ON"] = '1'
os.environ["RWKV_RUN_DEVICE"] = 'cuda'
args.RUN_DEVICE = "cuda"
args.ctx_len=1024
args.MODEL_NAME = "/home/user/RWKV/RWKV-4-Raven-3B-v9-Eng99%-Other1%-20230411-ctx4096"
from rwkv.model import RWKV
model = RWKV(
"/home/user/RWKV/RWKV-4-Raven-3B-v9-Eng99%-Other1%-20230411-ctx4096.pth",
'cuda:0 fp16 -> cuda:1 fp16'
#'cuda:0 fp16i8 -> cuda:1 fp16i8'
)
model.share_memory()
model.eval()
import tokenizers
tokenizer = tokenizers.Tokenizer.from_file("/home/user/RWKV/20B_tokenizer.json")
from rwkv.utils import PIPELINE
pipeline = PIPELINE(model, "/home/user/RWKV/20B_tokenizer.json")
return model, tokenizer, pipeline
def run_rnn(model, pipeline, _tokens: List[str], newline_adj: int = 0, CHUNK_LEN: int = 256, model_tokens = [], model_state: Any = None) -> Any:
AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ",:?!"
for i in AVOID_REPEAT:
dd = pipeline.encode(i)
assert len(dd) == 1
AVOID_REPEAT_TOKENS += dd
tokens = [int(x) for x in _tokens]
model_tokens += tokens
out: Any = None
while len(tokens) > 0:
out, model_state = model.forward(
tokens[: CHUNK_LEN], model_state
)
tokens = tokens[CHUNK_LEN :]
END_OF_LINE = 187
out[END_OF_LINE] += newline_adj # adjust \n probability
if model_tokens[-1] in AVOID_REPEAT_TOKENS:
out[model_tokens[-1]] = -999999999
return out, model_tokens, model_state
def has_stop_token(text, stops):
for s in stops:
if s in text:
return True
return False
def token_partial_stop(tok, stops):
for s in stops:
if tok in s:
return True
return False
last_tokens = []
def generation_job(model, tokenizer, pipeline, data):
uid = data['uuid']
model_tokens = []
model_state = None
tokens[uid] = []
sem.acquire()
logits, model_tokens, model_state = run_rnn(
model,
pipeline,
tokenizer.encode(data['prompt']).ids,
CHUNK_LEN = data['CHUNK_LEN'],
model_tokens = model_tokens,
model_state = model_state
)
sem.release()
begin = len(model_tokens)
out_last = begin
decoded = ""
occurrence: Dict = {}
overallstring = ""
for i in range(int(data['max_tokens_per_generation'])):
for n in occurrence:
logits[n] -= (
data['penalty_alpha_presence'] + occurrence[n] * data['penalty_alpha_frequency']
)
sem.acquire()
token = pipeline.sample_logits(
logits,
temperature=data['temperature'],
top_p=data['top_p'],
)
sem.release()
END_OF_TEXT = 0
if token == END_OF_TEXT:
break
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
sem.acquire()
logits, model_tokens, model_state = run_rnn(
model,
pipeline,
[token],
CHUNK_LEN = data['CHUNK_LEN'],
model_tokens = model_tokens,
model_state = model_state
)
sem.release()
xxx = tokenizer.decode([token])
if token_partial_stop(xxx, data['stop']):
last_tokens.append(xxx)
else:
for tok in last_tokens:
tokens[uid].append(tok)
sys.stdout.write(tok)
last_tokens.clear()
tokens[uid].append(xxx)
sys.stdout.write(xxx)
sys.stdout.flush()
overallstring += xxx
if has_stop_token(overallstring, data['stop']):
last_tokens.clear()
break
tokens[uid].append("[DONE]")
def generation_worker():
model, tokenizer, pipeline = load_model(config)
while True:
try:
generation_event.wait()
generation_job(model, tokenizer, pipeline, current_job_params)
generation_event.clear()
except KeyboardInterrupt:
print("Ctrl+C pressed. Exiting...")
break
def register_model(app):
@app.route('/api/completion/submit', methods=['POST'])
def completion_submit():
if generation_event.is_set():
return {'status': "Busy: System is busy."}
data = request.get_json()
if "text" not in data:
return {'status': "Erros: No input field provided"}
uid = str(uuid.uuid4())
current_job_params['max_tokens_per_generation'] = int(data['max_new_tokens']) if 'max_new_tokens' in data else 256
current_job_params['temperature'] = float(data['temperature']) if 'temperature' in data else 1.0
current_job_params['top_p'] = float(data['top_p']) if 'top_p' in data else 0.5
current_job_params['CHUNK_LEN'] = int(data['CHUNK_LEN']) if 'CHUNK_LEN' in data else 256
current_job_params['penalty_alpha_frequency'] = float(data['penalty_alpha_frequency']) if 'penalty_alpha_frequency' in data else 0.4
current_job_params['penalty_alpha_presence'] = float(data['penalty_alpha_presence']) if 'penalty_alpha_presence' in data else 0.4
current_job_params['prompt'] = data['text']
current_job_params['stop'] = data['stop'] if 'stop' in data else []
current_job_params['uuid'] = uid
tokens[uid] = []
generation_event.set()
return {'uuid': uid}
@app.route('/api/completion/fetch', methods=['POST'])
def completion_fetch():
data = request.get_json()
if "uuid" not in data:
return {'text': "[DONE]"}
uid = data["uuid"]
if "index" not in data:
return {'text': "[DONE]"}
index = int(data["index"])
while index+1 > len(tokens[uid]):
time.sleep(1/1000)
return {'text': tokens[uid][index]}
def embed_text(text):
return embedding_model.encode([text])
def register_embedding(app):
@app.route('/api/embedding', methods=['POST'])
def embedding():
data = request.get_json()
try:
text = data['text']
except:
return {'error': "No text provided"}
sem.acquire()
gc.collect()
output = embed_text(text)
gc.collect()
torch.cuda.empty_cache()
sem.release()
return {'embedding': output[0].tolist()}
def register_sentiment(app):
@app.route('/api/sentiment', methods=['POST'])
def sentiment():
data = request.get_json()
try:
key = data['API_KEY']
except:
return {'error': "Invalid API key"}
try:
text = data['text']
except:
return {'error': "No text provided"}
if key == config.get('API', 'KEY'):
sem.acquire()
gc.collect()
sent = sentiment_model.polarity_scores(text)
gc.collect()
torch.cuda.empty_cache()
sem.release()
return sent
else:
return {'error': "Invalid API key"}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='LibertyAI: API server',
description='Choose what API services to run',
epilog='Give me Liberty or give me death - Patrick Henry, 1775'
)
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-m', '--model', action='store_true')
parser.add_argument('-e', '--embeddings', action='store_true')
parser.add_argument('-s', '--sentiment', action='store_true')
args = parser.parse_args()
if args.model or args.embeddings:
config = get_configuration()
sem = threading.Semaphore(10)
app = Flask(__name__)
gc.freeze()
gc.enable()
if args.model:
register_model(app)
p = threading.Thread(target=generation_worker)
p.start()
if args.embeddings:
embedding_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
register_embedding(app)
if args.sentiment:
sentiment_model = SentimentIntensityAnalyzer()
register_sentiment(app)
http_server = WSGIServer(('', int(config.get('DEFAULT', 'APIServicePort'))), app)
http_server.serve_forever()
else:
parser.print_help()
| [] |
2024-01-10 | leviathanch/libertyai | LibertyAI~liberty_chain.py | import json
import re
import hashlib
from typing import (
Any,
Dict,
Optional,
Tuple,
Sequence,
List,
Union,
)
from pydantic import (
BaseModel,
Extra,
Field,
root_validator
)
from datetime import datetime
from sentence_transformers import util
from langdetect import detect
import langchain
from langchain.chains.llm import LLMChain
from langchain.memory.buffer import ConversationBufferMemory
from langchain.memory import ConversationSummaryMemory
from langchain.memory import PostgresChatMessageHistory
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseMemory
from langchain.agents.agent import AgentExecutor
from langchain.vectorstores.pgvector import PGVector
from LibertyAI.liberty_prompt import EN_PROMPT, DE_PROMPT
from LibertyAI.liberty_llm import LibertyLLM
from LibertyAI.liberty_embedding import LibertyEmbeddings
class LibertyChain(LLMChain, BaseModel):
human_prefix: str = "Human"
ai_prefix: str = "LibertyAI"
hash_table: dict = {}
prompt: BasePromptTemplate = EN_PROMPT
mrkl: AgentExecutor = None
memory: BaseMemory = None
summary: ConversationSummaryMemory = None
user_name: str = ""
user_mail: str = ""
embeddings: LibertyEmbeddings = None
vectordb: PGVector = None
pghistory: PostgresChatMessageHistory = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prep outputs."""
self._validate_outputs(outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
def start_generations(self, message):
encoded1 = self.embeddings.embed_query(message)
encoded2 = self.embeddings.embed_query("What's the weather in X?")
context = ""
if self.mrkl:
if util.pytorch_cos_sim(encoded1, encoded2)[0] > 0.5:
context = self.mrkl.run(message)
#else:
# documents = self.vectordb.similarity_search_with_score(query=message, k=1)
# context = documents[0][0].page_content
if self.memory:
chat_history = self.memory.load_memory_variables(inputs=[])['history']
else:
chat_history = ""
if self.summary:
chat_summary = self.summary.load_memory_variables(inputs=[])['history']
else:
chat_summary = ""
d = {
'input': message,
'history': chat_history,
'context': context,
'summary': chat_summary,
'current_date': datetime.now().strftime("%A (%d/%m/%Y)"),
'current_time': datetime.now().strftime("%H:%M %p"),
#'user_name': self.user_name,
#'user_mail': self.user_mail,
}
try:
match detect(message):
case 'de':
self.prompt = DE_PROMPT
case 'en':
self.prompt = EN_PROMPT
case _:
self.prompt = EN_PROMPT
except:
self.prompt = EN_PROMPT
uuid = self.llm.submit_partial(self.prep_prompts([d])[0][0].text, stop = ["\nHuman:", " \n"])
self.hash_table[uuid] = {
'message': message,
'reply': ""
}
return uuid
def get_part(self, uuid, index):
if uuid not in self.hash_table:
return "[DONE]"
try:
text = self.llm.get_partial(uuid, index)
except:
return "[DONE]"
if text == "[DONE]":
if self.memory:
self.memory.save_context(
inputs = {self.human_prefix: self.hash_table[uuid]['message'].strip()},
outputs = {self.ai_prefix: self.hash_table[uuid]['reply'].strip()}
)
if self.summary:
self.summary.save_context(
inputs = {self.human_prefix: self.hash_table[uuid]['message'].strip()},
outputs = {self.ai_prefix: self.hash_table[uuid]['reply'].strip()}
)
if self.pghistory:
self.pghistory.add_user_message(self.hash_table[uuid]['message'].strip())
self.pghistory.add_ai_message(self.hash_table[uuid]['reply'].strip())
del self.hash_table[uuid]
elif text != "[BUSY]":
self.hash_table[uuid]['reply'] += text
return text
def chat_history(self):
ret = []
if self.pghistory:
for message in self.pghistory.messages:
if type(message) == langchain.schema.HumanMessage:
ret.append({'Human': message.content})
if type(message) == langchain.schema.AIMessage:
ret.append({'LibertyAI': message.content})
return ret
| [] |
2024-01-10 | leviathanch/libertyai | LibertyAI~liberty_llm.py | import time
from langchain.llms.base import LLM
from typing import Optional, List, Mapping, Any
import requests
from LibertyAI.liberty_config import get_configuration
class LibertyLLM(LLM):
endpoint: str
#temperature: float
#max_tokens: int
echo: bool = False
@property
def _llm_type(self) -> str:
return "liberty"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
uuid = self.submit_partial(prompt, stop)
if not uuid:
return "[DONE]"
ret = ""
text = ""
i = 0
while text != "[DONE]":
text = self.get_partial(uuid, i)
i += 1
if text != "[DONE]":
ret += text
return ret
def submit_partial(self, prompt: str, stop: Optional[List[str]] = None) -> str:
prompt = prompt.replace("[DONE]", b'\xf0\x9f\x96\x95'.decode()).replace("[BUSY]", b'\xf0\x9f\x96\x95'.decode())
config = get_configuration()
jd = {'text' : prompt,}
if stop:
jd['stop'] = stop
try:
response = requests.post(
self.endpoint+'/submit',
json = jd,
)
reply = response.json()
except:
return None
if 'uuid' in reply:
return reply['uuid']
else:
return None
def get_partial(self, uuid, index):
text = "[DONE]"
config = get_configuration()
jsd = {'uuid' : uuid, 'index': str(index) }
try:
response = requests.post(
self.endpoint+'/fetch',
json = jsd,
)
reply = response.json()
except:
return "[DONE]"
if 'text' in reply:
text = reply['text']
return text
| [
"b'\\xf0\\x9f\\x96\\x95'"
] |
2024-01-10 | leviathanch/libertyai | LibertyAI~liberty_tools.py | import json
import re
import hashlib
from aiohttp import ClientSession
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.pgvector import PGVector
from langchain.document_loaders import TextLoader
from langchain.utilities import SearxSearchWrapper
from langchain.tools import BaseTool
def get_vector_db_tool():
config = get_configuration()
# DB Vectors in PostgreSQL:
CONNECTION_STRING = PGVector.connection_string_from_db_params(
driver="psycopg2",
host=config.get('DATABASE', 'PGSQL_SERVER'),
port=config.get('DATABASE', 'PGSQL_SERVER_PORT'),
database=config.get('DATABASE', 'PGSQL_DATABASE'),
user=config.get('DATABASE', 'PGSQL_USER'),
password=config.get('DATABASE', 'PGSQL_PASSWORD'),
)
embeddings = LibertyEmbeddings(
endpoint = "https://libergpt.univ.social/api/embedding"
)
db = PGVector(
embedding_function = embeddings,
connection_string = CONNECTION_STRING,
)
return Tool(
name = "PGVector",
func=db.similarity_search_with_score,
description="useful for when you need to look up context in your database of reference texts."
)
#tools = []
#tools.append(get_date_time_tool())
#tools.append(get_vector_db_tool())
tools += load_tools(
["searx-search"], searx_host="http://libergpt.univ.social/searx",
llm = LibertyLLM(
endpoint = "https://libergpt.univ.social/api/generation",
temperature = 0,
max_tokens = 20,
verbose = True,
),
)
| [] |
2024-01-10 | leviathanch/libertyai | tools~import_new_document.py | from flask import Flask, render_template, request
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.pgvector import (
PGVector,
DistanceStrategy,
)
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.document_loaders import TextLoader
from langchain.chains import ConversationChain
from langchain.memory import (
ConversationBufferMemory,
CombinedMemory,
ConversationSummaryMemory,
ConversationKGMemory,
)
from langchain import PromptTemplate, LLMChain
from LibertyAI import (
LibertyLLM,
LibertyEmbeddings,
get_configuration,
)
loader = TextLoader('/home/leviathan/libertyai/critique_of_interventionism_clean.txt')
#loader = TextLoader('/home/leviathan/libertyai/test.txt')
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = LibertyEmbeddings(endpoint="http://libergpt.univ.social/api/embedding")
config = get_configuration()
CONNECTION_STRING = PGVector.connection_string_from_db_params(
driver="psycopg2",
host=config.get('DATABASE', 'PGSQL_SERVER'),
port=config.get('DATABASE', 'PGSQL_SERVER_PORT'),
database=config.get('DATABASE', 'PGSQL_DATABASE'),
user=config.get('DATABASE', 'PGSQL_USER'),
password=config.get('DATABASE', 'PGSQL_PASSWORD'),
)
db = PGVector.from_documents(
embedding=embeddings,
documents=docs,
connection_string=CONNECTION_STRING,
# distance_strategy = DistanceStrategy.COSINE,
)
| [] |
2024-01-10 | leviathanch/libertyai | LibertyAI~liberty_agent.py | from langchain.llms import OpenAI
from langchain.agents import initialize_agent, Tool
from langchain.agents import load_tools
#from word2number import w2n
from datetime import date, datetime, timezone, timedelta
from langchain.vectorstores.pgvector import PGVector
from LibertyAI.liberty_llm import LibertyLLM
from LibertyAI.liberty_embedding import LibertyEmbeddings
from LibertyAI.liberty_config import get_configuration
from LibertyAI.liberty_weather_tool import get_current_weather
def current_time(arg):
print(arg)
now = datetime.now()
today = date.today()
ret = now.strftime("Date: %A, %B %dth, %Y. (%d/%m/%Y), Local Time: %H:%M %p")
return ret
def get_date_time_tool():
return Tool(
name = "DateTime",
func=current_time,
description="When you are asked for the current time, you should use this tool."
)
def get_vector_db():
config = get_configuration()
# DB Vectors in PostgreSQL:
CONNECTION_STRING = PGVector.connection_string_from_db_params(
driver="psycopg2",
host=config.get('DATABASE', 'PGSQL_SERVER'),
port=config.get('DATABASE', 'PGSQL_SERVER_PORT'),
database=config.get('DATABASE', 'PGSQL_DATABASE'),
user=config.get('DATABASE', 'PGSQL_USER'),
password=config.get('DATABASE', 'PGSQL_PASSWORD'),
)
embeddings = LibertyEmbeddings(
endpoint = "https://libergpt.univ.social/api/embedding"
)
db = PGVector(
embedding_function = embeddings,
connection_string = CONNECTION_STRING,
)
return db
def get_vector_db_tool():
return Tool(
name = "PGVector",
func=get_vector_db().similarity_search_with_score,
description="a QA database with books, it contains your opinions about things"
)
def get_weather_tool():
return Tool(
name = "Weather",
func=get_current_weather,
description="a weather tool, useful for when you're asked about the current weather."
)
def get_zero_shot_agent(llm):
tools = []
tools.append(get_weather_tool())
#tools.append(get_vector_db_tool())
#tools.append(get_date_time_tool())
#tools += load_tools(
# ["searx-search"], searx_host="http://libergpt.univ.social/searx",
# llm = llm,
#)
mrkl = initialize_agent(
tools,
llm,
agent="zero-shot-react-description",
verbose=True
)
return mrkl
| [] |
2024-01-10 | leviathanch/libertyai | LibertyAI~liberty_embedding.py | from typing import List
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
from LibertyAI.liberty_config import get_configuration
import requests
class LibertyEmbeddings(Embeddings, BaseModel):
endpoint: str
def _get_embedding(self, text: str) -> List[float]:
config = get_configuration()
json_data = {
'text' : text,
#'API_KEY': config.get('API', 'KEY'),
}
response = requests.post(
self.endpoint,
json = json_data,
)
try:
reply = response.json()['embedding']
except:
reply = []
return reply
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
return self._get_embedding(text)
| [] |
2024-01-10 | ezerror/DocsGPT | scripts~parser~py2doc.py | import os
import ast
import tiktoken
from pathlib import Path
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_def = ast.get_source_segment(source_code, node)
functions[func_name] = func_def
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
class_name = node.name
function_names = []
for subnode in ast.walk(node):
if isinstance(subnode, ast.FunctionDef):
function_names.append(subnode.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict
def parse_functions(functions_dict, formats, dir):
c1 = len(functions_dict)
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
print(f"Processing file {i}/{c1}")
source_w = source.replace(dir+"/", "").replace("."+formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for j, (name, function) in enumerate(functions.items(), start=1):
print(f"Processing function {j}/{len(functions)}")
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=function))
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
with open(f"outputs/{source_w}", mode) as f:
f.write(f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
def parse_classes(classes_dict, formats, dir):
c1 = len(classes_dict)
for i, (source, classes) in enumerate(classes_dict.items()):
print(f"Processing file {i+1}/{c1}")
source_w = source.replace(dir+"/", "").replace("."+formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for name, function_names in classes.items():
print(f"Processing Class {i+1}/{c1}")
prompt = PromptTemplate(
input_variables=["class_name", "functions_names"],
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(class_name=name, functions_names=function_names))
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
def transform_to_docs(functions_dict, classes_dict, formats, dir):
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
total_price = ((num_tokens / 1000) * 0.02)
print(f"Number of Tokens = {num_tokens:,d}")
print(f"Approx Cost = ${total_price:,.2f}")
user_input = input("Price Okay? (Y/N)\n").lower()
if user_input == "y" or user_input == "":
if not Path("outputs").exists():
Path("outputs").mkdir()
parse_functions(functions_dict, formats, dir)
parse_classes(classes_dict, formats, dir)
print("All done!")
else:
print("The API was not called. No money was spent.") | [
"Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
"functions_names",
"class_name",
"Code: \n{code}, \nDocumentation: "
] |
2024-01-10 | ezerror/DocsGPT | scripts~code_docs_gen.py | from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import pickle
import dotenv
import tiktoken
import sys
from argparse import ArgumentParser
import ast
dotenv.load_dotenv()
ps = list(Path("inputs").glob("**/*.py"))
data = []
sources = []
for p in ps:
with open(p) as f:
data.append(f.read())
sources.append(p)
# with open('inputs/client.py', 'r') as f:
# tree = ast.parse(f.read())
# print(tree)
def get_functions_in_class(node):
functions = []
functions_code = []
for child in node.body:
if isinstance(child, ast.FunctionDef):
functions.append(child.name)
functions_code.append(ast.unparse(child))
return functions, functions_code
def get_classes_and_functions(source_code):
tree = ast.parse(source_code)
classes = {}
for node in tree.body:
if isinstance(node, ast.ClassDef):
class_name = node.name
function_name, function = get_functions_in_class(node)
# join function name and function code
functions = dict(zip(function_name, function))
classes[class_name] = functions
return classes
structure_dict = {}
c1 = 0
for code in data:
classes = get_classes_and_functions(ast.parse(code))
source = str(sources[c1])
structure_dict[source] = classes
c1 += 1
# save the structure dict as json
import json
with open('structure_dict.json', 'w') as f:
json.dump(structure_dict, f)
# llm = OpenAI(temperature=0)
# prompt = PromptTemplate(
# input_variables=["code"],
# template="Code: {code}, Documentation: ",
# )
#
# print(prompt.format(code="print('hello world')"))
# print(llm(prompt.format(code="print('hello world')")))
if not Path("outputs").exists():
Path("outputs").mkdir()
c1 = len(structure_dict)
c2 = 0
for source, classes in structure_dict.items():
c2 += 1
print(f"Processing file {c2}/{c1}")
f1 = len(classes)
f2 = 0
for class_name, functions in classes.items():
f2 += 1
print(f"Processing class {f2}/{f1}")
source_w = source.replace("inputs/", "")
source_w = source_w.replace(".py", ".txt")
if not Path(f"outputs/{source_w}").exists():
with open(f"outputs/{source_w}", "w") as f:
f.write(f"Class: {class_name}")
else:
with open(f"outputs/{source_w}", "a") as f:
f.write(f"\n\nClass: {class_name}")
# append class name to the front
for function in functions:
b1 = len(functions)
b2 = 0
print(f"Processing function {b2}/{b1}")
b2 += 1
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=functions[function]))
if not Path(f"outputs/{source_w}").exists():
with open(f"outputs/{source_w}", "w") as f:
f.write(f"Function: {functions[function]}, \nDocumentation: {response}")
else:
with open(f"outputs/{source_w}", "a") as f:
f.write(f"\n\nFunction: {functions[function]}, \nDocumentation: {response}")
| [
"Code: \n{code}, \nDocumentation: "
] |
2024-01-10 | programmeddeath1/zebra-crssing | futureapp~pineapp_tabs.py | """Welcome to Reflex! This file outlines the steps to create a basic app."""
from rxconfig import config
from typing import List
import reflex as rx
import os, glob, json, shutil
from time import sleep
import subprocess
import re
import pandas as pd
from io import StringIO
import requests
import io
import random
from PIL import Image
from newspaper import Article
import urllib.request
import openai
openai.organization = "org-MGwFb1CrjeNZupHeaarBRyrN"
openai.api_key = os.getenv("OPENAI_API_KEY") or "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"
ram_url = "http://172.31.27.12:3000"
# from ram.models import tag2text_caption
# from ram import inference_tag2text as inference
# from ram import get_transform
# pretrained = '/home/dt/Projects/Work/citrusberry/recognize-anything/tag2text_swin_14m.pth'
# image_size = 384
# thre = 0.68
# specified_tags = 'None'
# # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device='cpu'
# transform = get_transform(image_size=image_size)
# delete_tag_index = [127,2961, 3351, 3265, 3338, 3355, 3359]
# # @st.cache_data # 👈 Add the caching decorator
# # @rx.memo
# def load_model():
# model = tag2text_caption(pretrained=pretrained,
# image_size=image_size,
# vit='swin_b',
# delete_tag_index=delete_tag_index)
# model.threshold = thre # threshold for tagging
# model.eval()
# model = model.to(device)
# return model
# model = load_model()
filename = f"{config.app_name}/{config.app_name}.py"
colindex = 0
accent_color = "#a883ed"
style = {
"background" : "rgb(250, 250, 250)",
# "font_family": "AirCerealMedium",
# "font_family": "'Ariel', sans-serif",
"font_size": "16px",
# "font_weight": "",
"::selection": {
"background_color": accent_color,
},
"th" : {
"background":"linear-gradient(45deg,#e6e4fc,#fceded)",
},
#Disco button
#Disco Button
rx.ResponsiveGrid: {
"animation": "fadeInAnimation ease 3s",
"animation-iteration-count": "1",
"animation-fill-mode": "forwards",
},
rx.Heading: {
# "font_size": "32px",
"font_family": "AirCereal",
# "font_family": "'Ariel', sans-serif",
"font_weight": "700",
"color": "#a61d55",
},
rx.Text: {
"font_family": "AirCerealNormalText",
"line-height" : "1.7",
# "font_weight": "100",
"font_size": "16px",
"font-weight": "normal",
# "font-variant": "normal"
},
rx.Card: {
"border-radius" : "16px",
# "box-shadow" : "5px 10px",
# "box-shadow" : "rgb(204, 219, 232) 3px 3px 6px 0px inset, rgba(255, 255, 255, 0.5) -3px -3px 6px 1px inset;"
"box-shadow" : "6px 6px 12px #b8b9be,-6px -6px 12px #fff!important",
"padding" : "10px 20px",
"margin" : "10px 20px",
# "background" : ""
},
rx.Badge: {
"padding" : "10px 20px!important",
"margin" : "10px 20px!important",
"text-transform" : "lowercase!important",
"border-radius" : "5px!important",
"box-shadow" : "5px 5px #000000!important",
},
rx.Slider: {
"height": "5px",
"overflow": "hidden",
"background": "#fff",
# "border" : "1px solid #29d",
},
rx.DataTable: {
"background": "linear-gradient(45deg,#e6e4fc,#fceded)",
}
# rx.SliderFilledTrack: {
# "position": "absolute",
# "top": "0",
# "right": "100%",
# "height": "5px",
# "width": "100%",
# "background": "#29d",
# }
rx.Tabs: {
"background" : "linear-gradient(45deg,#e6e4fc,#fceded) !important",
"color" : "#a61d55",
"shadow" : "lg",
"border-radius" : "16px",
"box-shadow" : "6px 6px 12px #b8b9be,-6px -6px 12px #fff!important",
}
}
class ArticleData:
def __init__(self, data_dir='data'):
self.data_dir = data_dir
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
self.data_file = os.path.join(self.data_dir, 'articles.json')
if not os.path.exists(self.data_file):
with open(self.data_file, 'w') as f:
json.dump([], f)
def store_article(self, article):
"""
Store article data to json file
"""
with open(self.data_file, 'r+') as f:
articles = json.load(f)
articles.append(article)
f.seek(0) # reset file position to the beginning.
json.dump(articles, f, indent=4)
def get_articles(self):
"""
Fetch all articles from json file
"""
with open(self.data_file, 'r') as f:
articles = json.load(f)
return articles
def delete_articles(self,article_url):
"""
Delete a specific article from json file
"""
with open(self.data_file, 'r+') as f:
articles = json.load(f)
articles = [article for article in articles if article['url'] != article_url]
f.seek(0) # reset file position to the beginning.
f.truncate() # remove existing file content.
json.dump(articles, f, indent=4)
article_data = ArticleData()
articles = article_data.get_articles()
a_options : List[str] = [datavalue['url'] for datavalue in articles]
b_options = []
def fetch_preloaded(self,input_url,article):
screendata = article
global model
global articles
self.processing,self.complete = False,False
yield
sleep(2)
import pdb;pdb. set_trace()
class State(rx.State):
# The colors to cycle through.
global a_options
colors: List[str] = [
"black",
"red",
"green",
"blue",
"purple",
]
# selected_option_a: str = "No selection yet."
# selected_option_a: str = a_options[0]
selected_option_a: str = ""
text: str = "Enter Page URL"
#text: str = a_options[0]
processing = False
complete = False
error_occured = False
image_url = ""
model_tags = ""
model_caption = ""
alert_msg = ""
alert_show: bool = False
alert_msg_header = ""
article_text = ""
article_title = ""
model_tag_list: List[str] = ['1','2']
# The index of the current color.
index: int = 0
df = pd.DataFrame([[]])
df1 = pd.DataFrame([[]])
pff1 = ""
pfl1 = ""
df2 = pd.DataFrame([[]])
pff2 = ""
pfl2 = ""
df3 = pd.DataFrame([[]])
pff3 = ""
pfl3 = ""
df4 = pd.DataFrame([[]])
pff4 = ""
pfl4 = ""
df5 = pd.DataFrame([[]])
pff5 = ""
pfl5 = ""
df6 = pd.DataFrame([[]])
pff6 = ""
pfl6 = ""
#Openai model outputs
iab_safety_response_msg = ""
iab_response_msg = ""
global_brands_response_msg = ""
indian_brands_response_msg = ""
web_inventory_response_msg = ""
ind_web_response_msg = ""
news_response_msg = ""
news_ind_response_msg = ""
sentiment_filled_bg = "red"
sentiment_empty_bg = "green.100"
sentiment_color = sentiment_empty_bg.split('.')[0]
sentiment_value = 0
sff1 = ""
sentiment_disp_value = 0
keyword_list = pd.DataFrame([[]])
# keyword_list: List[str] = [["1"]]
def next_color(self):
"""Cycle to the next color."""
self.index = (self.index + 1) % len(self.colors)
@rx.var
def color(self) -> str:
return self.colors[self.index]
def clear_text(self):
# import pdb;pdb.set_trace()
self.text = ""
def run_analysis(self):
screendata = {}
global model
global articles
self.processing,self.complete = False,False
yield
if self.text == "":
self.text = self.selected_option_a
if self.text == "Select an example." or self.text == "No selection yet." or self.text == "":
self.alert_msg_header = "Error"
self.alert_msg = "Please enter url link or select a link from the dropdown"
self.alert_change()
self.processing,self.complete = False,True
yield
return
self.processing,self.complete = True,False
self.iab_safety_response_msg = ""
self.iab_safety_response_msg = ""
self.iab_response_msg = ""
self.global_brands_response_msg = ""
self.indian_brands_response_msg = ""
self.web_inventory_response_msg = ""
self.ind_web_response_msg = ""
self.news_response_msg = ""
self.news_ind_response_msg = ""
self.sentiment_filled_bg = "red"
self.sentiment_empty_bg = "green.100"
self.sentiment_value = 0
self.sff1 = ""
self.sentiment_disp_value = 0
self.pff1 = " "
self.pfl1 = " "
self.pff2 = " "
self.pfl2 = " "
self.pff3 = " "
self.pfl3 = " "
self.pff4 = " "
self.pfl4 = " "
self.pff5 = " "
self.pfl5 = " "
self.pff6 = " "
self.pfl5 = " "
self.pfl6 = " "
yield
for article in articles:
if article['url'] == self.text:
screendata = article
article_image = screendata["image"]
imgpost = article_image.split('.')[-1]
article_title = screendata["title"]
print(f"Article image file is - {article_image}")
sleep(1)
try:
os.system(f"wget {article_image} -O downloads/mainimage.{imgpost}")
list_uploaded_file = f"downloads/mainimage.{imgpost}"
uploaded_file = list_uploaded_file
ui_uploaded_file = f'mainimage.{imgpost}'
shutil.move(uploaded_file,f'assets/{ui_uploaded_file}')
uploaded_file = "assets/"+ui_uploaded_file
except IndexError as error:
print("Image file doesnt exist")
uploaded_file = "doesntexist.txt"
except Exception as error:
print(error)
uploaded_file = "doesntexist.txt"
if article_title and os.path.exists(uploaded_file):
print("Main Execution")
self.processing,self.complete = False,True
image = Image.open(uploaded_file)
self.image_url = image
self.article_title = article_title
self.article_text = screendata["text"]
self.model_tag_list = screendata["tags"]
self.model_caption = screendata["caption"]
try:
self.sff1 = screendata["sff1"]
self.keyword_list = pd.DataFrame(screendata["keyword_list"])
self.sentiment_disp_value = screendata["sentiment_disp_value"]
self.sentiment_value = int(self.sentiment_disp_value * 10)
self.sentiment_disp_value = f"{self.sentiment_value}%"
self.sentiment_filled_bg = screendata["sentiment_filled_bg"]
self.sentiment_empty_bg = screendata["sentiment_empty_bg"]
self.sentiment_color = screendata["sentiment_empty_bg"].split('.')[0]
except Exception as error:
print(error)
try:
self.iab_safety_response_msg = screendata["sff1"]
except:
print(error)
yield
print("Get IAB Categories")
try:
self.df = pd.DataFrame(screendata["df"])
except Exception as error:
print(error)
self.error_occured = True
self.iab_response_msg = screendata["df"]
yield
print("Get Brands")
try:
self.pff1 = screendata["pff1"]
self.df1 = pd.DataFrame(screendata["df1"])
self.pfl1 = screendata["pfl1"]
except Exception as error:
print(error)
self.pff1 = screendata["pff1"]
yield
print("Indian Brands")
try:
self.pff2 = screendata["pff2"]
self.df2 = pd.DataFrame(screendata["df2"])
self.pfl2 = screendata["pfl2"]
except Exception as error:
print(error)
self.pff2 = screendata["pff2"]
yield
print("Websites")
try:
self.pff3 = screendata["pff3"]
self.df3 = pd.DataFrame(screendata["df3"])
self.pfl3 = screendata["pfl3"]
except Exception as error:
print(error)
self.pff3 = screendata["pff3"]
yield
print("Indian Websites")
try:
self.pff4 = screendata["pff4"]
self.df4 = pd.DataFrame(screendata["df4"])
self.pfl4 = screendata["pfl4"]
except Exception as error:
print(error)
self.pff4 = screendata["pff4"]
yield
print("News")
try:
self.pff5 = screendata["pff5"]
self.df5 = pd.DataFrame(screendata["df5"])
self.pfl5 = screendata["pfl5"]
except Exception as error:
print(error)
self.pff5 = screendata["pfl5"]
yield
print("News India")
try:
self.pff6 = screendata["pff6"]
self.df6 = pd.DataFrame(screendata["df6"])
self.pfl6 = screendata["pfl6"]
except Exception as error:
print(error)
self.pff6 = screendata["pff6"]
yield
return True
elif uploaded_file == "parisingerror":
self.alert_msg_header = "Error"
self.alert_msg = "Failed to parse url"
self.alert_change()
self.processing,self.complete = False,True
yield
elif not os.path.exists(uploaded_file):
self.alert_msg_header = "Error"
self.alert_msg = "Failed to load image"
self.alert_change()
self.processing,self.complete = False,True
yield
elif not article.title:
self.alert_msg_header = "Error"
self.alert_msg = "Failed to load data file"
self.alert_change()
self.processing,self.complete = False,True
yield
else:
self.alert_msg_header = "Error"
self.alert_msg = "Unknown Error"
self.alert_change()
self.processing,self.complete = False,True
yield
return True
removestatus = [os.remove(file) for file in glob.glob('downloads/*')]
removestatus = [os.remove(file) for file in glob.glob('assets/mainimage*')]
article = Article(self.text)
article.download()
# article.html
article.parse()
article_image = article.top_image
imgpost = article_image.split('.')[-1]
print(f"Article image file is - {article_image}")
try:
# Bug 001 - Websites like khaleej times mask image url, download through wget fails
#os.system(f"wget {article_image} -O downloads/mainimage.{imgpost}")
if 'jpg' in article_image or 'png' in article_image or 'jpeg' in article_image:
os.system(f"wget {article_image} -O downloads/mainimage.{imgpost}")
else:
response = requests.get(article_image)
imgpost = 'jpg'
# Check if the request was successful
if response.status_code == 200:
# Write the content of the response to a file
with open(f'downloads/mainimage.{imgpost}', 'wb') as file:
file.write(response.content)
list_uploaded_file = f"downloads/mainimage.{imgpost}"
uploaded_file = list_uploaded_file
ui_uploaded_file = f'mainimage.{imgpost}'
shutil.move(uploaded_file,f'assets/{ui_uploaded_file}')
uploaded_file = "assets/"+ui_uploaded_file
except IndexError as error:
print("Image file doesnt exist")
uploaded_file = "doesntexist.txt"
except Exception as error:
print(error)
uploaded_file = "doesntexist.txt"
if article.title and os.path.exists(uploaded_file):
print("Main Execution")
# image = Image.open(uploaded_file)
# self.image_url = ui_uploaded_file
print(uploaded_file)
image = Image.open(uploaded_file)
self.image_url = image
if image.mode == 'RGBA':
image = image.convert('RGB')
# image = transform(image).unsqueeze(0).to(device)
byte_arr = io.BytesIO()
image.save(byte_arr, format='JPEG')
byte_arr = byte_arr.getvalue()
# POST request to the Flask API
url = f"{ram_url}/analyze_image"
try:
response = requests.post(
url,
files={
'image': ('image.jpg', byte_arr, 'image/jpeg')
}
, timeout=10
)
except Exception as connerror:
print(connerror)
self.alert_msg_header = "Error"
self.alert_msg = "Image Processing Demo Service is Down - Will be up from 9:00 AM to 8:30 AM IST"
self.alert_change()
self.processing,self.complete = False,False
yield
return True
# Print the response
print(response.json())
res = response.json()
# res = inference(image, model, specified_tags)
self.article_title = article.title
if 'Register to read and get full access to gulfnews.com' in article.text:
self.article_text = f'This is a premium article. The text is unavailable - {article.text[0:2000]}'
else:
self.article_text = article.text[0:2000]
print("Model Identified Tags: ", res[0])
print("User Specified Tags: ", res[1])
print("Image Caption: ", res[2])
self.model_tag_list = res[0].split('|')
self.model_caption = res[2]
self.processing,self.complete = False,True
yield
# for i in range(0,10):
try:
mesg1 = {"role":"user","content":f"Please analyze the sentiment of the following message, and get the top 10 keywords or keyphrases - {self.article_text}. Rate the sentiment on a scale of 1 to 10 with 10 being extremely positive and 1 being extremely negative. Send response as first paragraph with only one word describing the sentiment of the article - positive.,negative. or neutral. ,preceded by 'Sentiment:' and followed by two next line characters, list of only 10 keywords separated by next line and last paragraph giving the rating.Show only the numeric value of the rating in curly braces."}
# mesg1 = {"role":"user","content":f"Please analyze the sentiment of the following message, and get the top 10 keywords or keyphrases - {self.article_text}. Rate the sentiment on a scale of 1 to 10 with 10 being extremely positive and 1 being extremely negative. Send response as first paragraph with only one word describing the sentiment of the article - positive.,negative. or neutral. ,preceded by 'Sentiment:', list of only 10 keywords separated by next line and last paragraph giving the rating.Show only the numeric value of the rating in curly braces."}
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role":"system","content":f"Please respond as a content expert and consultant and do not include disclaimers."},
mesg1
# {"role":"user","content":f"Suggest brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}"}
],
request_timeout=20
)
iab_safety_response_msg = response["choices"][0]["message"]
text = iab_safety_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
print(paragraphs)
try:
self.sff1 = paragraphs[0].split('Sentiment:')[-1].split('.')[0].strip() + " - "
except:
self.sff1 = paragraphs[0].split('Sentiment:')[-1].strip() + " - "
# import pdb;pdb.set_trace()
table_content = paragraphs[1]
brand_names = table_content.replace('`','').replace('|','').split('\n')
data = []
for match in brand_names:
if 'Key' in match or 'key' in match or '---' in match or not match or 'rand' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
keyword = match
data.append({'Keywords': keyword})
if len(data) > 10:
break
self.keyword_list = pd.DataFrame(data)
# self.iab_safety_response_msg = iab_safety_response_msg["content"]
para2 = paragraphs[-1] if match not in paragraphs[-1] else ""
match = re.search(r"{([0-9.]+)}$", para2)
if match:
self.sentiment_disp_value = float(match.group(1))
self.sentiment_value = int(self.sentiment_disp_value * 10)
self.sentiment_disp_value = f"{self.sentiment_value}%"
else:
self.sentiment_value = 0
if self.sentiment_value <= 40:
self.sentiment_filled_bg = "red"
self.sentiment_empty_bg = "red.100"
self.sentiment_color = "red"
elif self.sentiment_value >= 60:
self.sentiment_filled_bg = "green"
self.sentiment_empty_bg = "green.100"
self.sentiment_color = "green"
else:
self.sentiment_filled_bg = "grey"
self.sentiment_empty_bg = "grey.100"
self.sentiment_color = "grey"
except Exception as error:
print(error)
try:
self.iab_safety_response_msg = text
except:
print(error)
yield
# st.info(f'X-rae Response - {iab_response_msg["content"]}')
print("Get IAB Categories")
sysmessage = {"role":"system","content":f"Please respond as a content expert and consultant and do not include disclaimers.Please provide information in tabular format or in a format which can be parsed into a table using a python API."}
message1 = {"role":"user","content":f"Please take a guess of the IAB categories for an article with following image tags and caption - {res[0]} - {res[2]}. Return a table with single column of IAB category list "}
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
# {"role":"user","content":f"Suggest brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}"}
],
request_timeout=20
)
iab_response_msg = response["choices"][0]["message"]
# Convert response to data
text = iab_response_msg["content"]
# matches = re.findall(r'(\w+)\s*->\s*(.*)', text)
matches = re.findall(r'\| (.*) \|\n', text)
data = []
for match in matches:
if 'IAB' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
iab_category = match
# data.append({'Image Tag': image_tag, 'IAB Category': iab_category})
data.append({'IAB Category': iab_category})
# Create a DataFrame from the data
# print(data)
self.df = pd.DataFrame(data)
except Exception as error:
print(error)
self.error_occured = True
self.iab_response_msg = text
# self.iab_response_msg = iab_response_msg["content"]
yield
print("Get Brands")
# import pdb;pdb.set_trace()
reply1 = {"role":"assistant","content":f"{iab_response_msg['content']}"}
# for trial in range(0,10):
message2 = {"role":"user","content":f"Suggest list of top 10 global brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}.Return response as a table with single column of Brand names without index"}
#Return a table as comma-separated values with single column of Brand names
# # st.info(f'X-rae Response - {iab_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2
],
request_timeout=20
)
global_brands_response_msg = response["choices"][0]["message"]
text = global_brands_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract last paragraph
# last_paragraph = re.sub(paragraph_pattern, '', text, count=1, flags=re.MULTILINE | re.DOTALL)
# Extract the first paragraph
self.pff1 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
brand_names = table_content.replace('`','').replace('|','').split('\n')
data = []
for match in brand_names:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
iab_category = match
# data.append({'Image Tag': image_tag, 'IAB Category': iab_category})
data.append({'Brand Names': iab_category})
# Create a DataFrame from the data
# print(data)
self.df1 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df1 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl1 = paragraphs[-1] if match not in paragraphs[-1] else ""
# self.global_brands_response_msg = global_brands_response_msg["content"]
except Exception as error:
print(error)
self.pff1 = text
yield
print("Indian Brands")
reply2 = {"role":"assistant","content":f"{global_brands_response_msg['content']}"}
message3 = {"role":"user","content":f"Suggest list of top 10 Indian brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}. Return response as a table with single column of Brand names without index"}
# # st.info(f'X-rae Response - {global_brands_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
],
request_timeout=20
)
indian_brands_response_msg = response["choices"][0]["message"]
# self.indian_brands_response_msg = indian_brands_response_msg["content"]
text = indian_brands_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff2 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
brand_names = table_content.replace('`','').replace('|','').split('\n')
data = []
for match in brand_names:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
iab_category = match
# data.append({'Image Tag': image_tag, 'IAB Category': iab_category})
data.append({'Brand Names': iab_category})
self.df2 = pd.DataFrame(data)
# # Create a DataFrame from the table content
# self.df2 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl2 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff2 = text
yield
print("Websites")
reply3 = {"role":"assistant","content":f"{indian_brands_response_msg['content']}"}
message4 = {"role":"user","content":f"Suggest the right list of top 10 global website inventory to run the aboce brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {indian_brands_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
],
request_timeout=20
)
web_inventory_response_msg = response["choices"][0]["message"]
# self.web_inventory_response_msg = web_inventory_response_msg["content"]
text = web_inventory_response_msg["content"]
if "apologize" in text or 'unfortunately' in text or 'Unfortunately' in text or "Apologize" in text or "Apologise" in text or "apologise" in text:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
],
request_timeout=20
)
web_inventory_response_msg = response["choices"][0]["message"]
# self.web_inventory_response_msg = web_inventory_response_msg["content"]
text = web_inventory_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff3 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df3 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df3 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# self.df3 = pd.DataFrame([row.split(':') for row in table_content], columns=['Category', 'Websites'])
# Strip leading and trailing whitespace from the DataFrame
# self.df3 = self.df3.apply(lambda x: x.str.strip())
# Extract the last paragraph
self.pfl3 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff3 = text
yield
print("Indian Websites")
reply4 = {"role":"assistant","content":f"{web_inventory_response_msg['content']}"}
message5 = {"role":"user","content":f"Suggest the right list of top 10 Indian website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {web_inventory_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
],
request_timeout=20
)
# import pdb;pdb.set_trace()
ind_web_response_msg = response["choices"][0]["message"]
# self.ind_web_response_msg = ind_web_response_msg["content"]
text = ind_web_response_msg["content"]
print(text)
if "apologize" in text or 'unfortunately' in text or 'Unfortunately' in text or "Apologize" in text or "Apologise" in text or "apologise" in text:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
],
request_timeout=20
)
# import pdb;pdb.set_trace()
ind_web_response_msg = response["choices"][0]["message"]
# self.ind_web_response_msg = ind_web_response_msg["content"]
text = ind_web_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff4 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df4 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df4 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# self.df4 = pd.DataFrame([row.split(':') for row in table_content], columns=['Category', 'Websites'])
# self.df4 = self.df4.apply(lambda x: x.str.strip())
# Extract the last paragraph
self.pfl4 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff4 = text
yield
print("News")
reply5 = {"role":"assistant","content":f"{ind_web_response_msg['content']}"}
message6 = {"role":"user","content":f"Suggest the right list of top 10 News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {ind_web_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
reply5,
message6,
],
request_timeout=20
)
# import pdb;pdb.set_trace()
news_response_msg = response["choices"][0]["message"]
# self.news_response_msg = news_response_msg["content"]
text = news_response_msg["content"]
if "apologize" in text or 'Unfortunately' in text or "Apologize" in text or "Apologise" in text or "apologise" in text:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
reply5,
message6,
],
request_timeout=20
)
# import pdb;pdb.set_trace()
news_response_msg = response["choices"][0]["message"]
# self.news_response_msg = news_response_msg["content"]
text = news_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff5 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df5 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df5 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl5 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff5 = text
yield
print("News India")
reply6 = {"role":"assistant","content":f"{news_response_msg['content']}"}
message7 = {"role":"user","content":f"Suggest the right list of top 10 Indian News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {news_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
reply5,
message6,
reply6,
message7,
],
request_timeout=20
)
# import pdb;pdb.set_trace()
news_ind_response_msg = response["choices"][0]["message"]
# self.news_ind_response_msg = news_ind_response_msg["content"]
text = news_ind_response_msg["content"]
if "apologize" in text or 'Unfortunately' in text or "Apologize" in text or "Apologise" in text or "apologise" in text:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
reply5,
message6,
reply6,
message7,
],
request_timeout=20
)
# import pdb;pdb.set_trace()
news_ind_response_msg = response["choices"][0]["message"]
# self.news_ind_response_msg = news_ind_response_msg["content"]
text = news_ind_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff6 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df6 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df6 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl6 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff6 = text
yield
return True
# st.info(f'X-rae Response - {news_ind_response_msg["content"]}')
elif not os.path.exists(uploaded_file):
self.alert_msg_header = "Error"
self.alert_msg = "Failed to load image"
self.alert_change()
self.processing,self.complete = False,True
yield
# return rx.window_alert("Failed to load image")
# st.error("Failed to load image")
elif not article.title:
self.alert_msg_header = "Error"
self.alert_msg = "Failed to load data file"
self.alert_change()
self.processing,self.complete = False,True
yield
# return rx.window_alert("Failed to load data file ")
# st.error("Failed to load data file ")
else:
self.alert_msg_header = "Error"
self.alert_msg = "Unknown Error"
self.alert_change()
self.processing,self.complete = False,True
yield
# return rx.window_alert("Failed to load data file ")
# print(f"Files not found - {uploaded_file} - 'out.json'")
def alert_change(self):
self.alert_show = not (self.alert_show)
def tag_list(tag: str):
# index = random.randint(0,4)
global colindex
# import pdb;pdb.set_trace()
if colindex == 0:
colindex = 1
else:
colindex = 0
print(f"Color index is {colindex}")
colorval = ["#a61d55","#991BE2"]
return rx.badge(
tag, variant="solid",
background="transparent",
line_height="1.42",
# bg="#fff",
color=f"{colorval[colindex]}",##991BE2
border_color=f"{colorval[colindex]}",##991BE2
border_width="1px",
border_style= "solid",
font_size="1em",
font_weight="normal",
text_transform = "lowercase",
border_radius = "1.41em",
cursor = "pointer",
# box_shadow = "5px 5px #000000",
margin = "6px",
padding = "0.7em 1.4em"
)
def colored_box(color: str):
return rx.box(rx.text(color), bg=color)
def index() -> rx.Component:
return rx.fragment(
rx.hstack(
rx.image(src="https://citrusberry.biz/assets/img/menu_logo1.png", width="41px", height="auto"),
rx.image(src="https://citrusberry.biz/assets/img/menu_logo.png", width="90px", height="auto"),
padding="10px",
margin="5px",
),
# rx.color_mode_button(rx.color_mode_icon(), float="right"),
rx.vstack(
rx.tooltip(
rx.card(
rx.center(
rx.image(src="logo-no-background.png", width="200px", height="auto"),
# rx.heading("X-Rae Output", size="xl", color="#fb5e78"),
# border_radius="15px",
# border_width="thick",
width="100%",
# border_color="#fb5e78",
),
),
# background="linear-gradient(90deg, #ff5c72, #a485f2)",
# rx.heading("Contextual AI Demo!", font_size="2em",color="#a61d55",),
label="Please enter or select a link and click on Analyze",
),
rx.alert_dialog(
rx.alert_dialog_overlay(
rx.alert_dialog_content(
rx.alert_dialog_header(State.alert_msg_header),
rx.alert_dialog_body(
State.alert_msg
),
rx.alert_dialog_footer(
rx.button(
"Close",
on_click=State.alert_change,
)
),
)
),
is_open=State.alert_show,
),
# rx.box("Get started by editing ", rx.code(filename, font_size="1em")),
rx.accordion(
rx.center(
rx.accordion_item(
rx.accordion_button(
rx.tooltip(
rx.text(
"Select an example from dropdown"
),
label="Click here to see Examples",
),
rx.accordion_icon(),
),
rx.accordion_panel(
rx.center(
rx.tooltip(
rx.select(
a_options,
placeholder="select.",
# on_change=State.set_selected_option_a,
# on_change=State.fetch_preloaded,
on_change=State.set_text,
),
label="Please select a link and Click on Analyze",
),
width="1000px"
),
),
),
),
allow_multiple=True,
width="80%",
font_size="16px",
# width="1000px",
),
rx.center(
rx.tooltip(
rx.icon(
# tag="link",margin_right="10px",on_click=rx.set_clipboard(State.selected_option_a),
tag="link",margin_right="10px",
),
# label="Copy Link",
label="Please enter a url link and click on Analyze",
),
rx.tooltip(
rx.input(
# placeholder="Enter the page url",
on_blur=State.set_text,
width="100%",
value=State.text,
on_change=State.set_text,
# default_value=State.text,
),
label="You can enter a custom url link and click on Analyze",
),
width="1000px"
),
rx.hstack(
# rx.button(
# "Clear", on_click=State.clear_text,width="100%",
# ),
rx.html("""
<button class='btn-101'>
Analyse
<svg>
<defs>
<filter id='glow'>
<fegaussianblur result='coloredBlur' stddeviation='5'></fegaussianblur>
<femerge>
<femergenode in='coloredBlur'></femergenode>
<femergenode in='coloredBlur'></femergenode>
<femergenode in='coloredBlur'></femergenode>
<femergenode in='SourceGraphic'></femergenode>
</femerge>
</filter>
</defs>
<rect />
</svg>
</button>
""",on_click=State.run_analysis),
# rx.button(
# "Analyze", on_click=State.run_analysis,is_loading=State.processing,width="100%",
# background_image="linear-gradient(90deg, #ff5c72, #a485f2)",
# ),
),
rx.cond(
State.processing,
rx.circular_progress(is_indeterminate=True,color="#a61d55"
),
),
rx.cond(
State.complete,
rx.responsive_grid(
rx.vstack(
# rx.divider(border_color="#a61d55"),
rx.heading(State.article_title, size="lg",margin="30px",
),
# rx.hstack(
rx.responsive_grid(
rx.card(
rx.center(
rx.image(
src=State.image_url,
height="25em",
width="37.5em",
),
border_radius="10px",
border_width="2px",
border_color="#a61d55",
width="100%",
),
header=rx.heading("Article Image", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
rx.text(State.article_text),
header=rx.heading("Article Text", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
# rx.divider(border_color="black"),
rx.responsive_grid(
rx.card(
rx.center(
rx.vstack(
rx.hstack(
rx.foreach(State.model_tag_list,tag_list),rx.spacer(),
),
rx.hstack(
rx.heading(State.model_caption, size="lg", ),
),
),
),
background="linear-gradient(45deg,#e6e4fc,#fceded)",
header=rx.heading("X RAE Image Analysis", size="lg"),
),
columns=[1],
spacing="4",
width="100%",
),
rx.responsive_grid(
# rx.divider(border_color="black"),
rx.card(
rx.center(
rx.vstack(
# rx.hstack(
rx.heading(
State.sff1+State.sentiment_disp_value, color=State.sentiment_filled_bg,opacity="0.8"
),
# ),
# rx.hstack(
rx.progress(value=State.sentiment_value, width="100%",color_scheme=State.sentiment_color,height="15px",bg="#fff",opacity="0.8"),
# ),
width="75%",
),
),
background="linear-gradient(45deg,#e6e4fc,#fceded)",
header=rx.heading("Overall Sentiment", size="lg"),
),
columns=[1],
spacing="4",
width="100%",
),
rx.tabs(
items=[
(
"Targeting Metrics",
rx.responsive_grid(
rx.card(
rx.vstack(
rx.data_table(
data=State.keyword_list,
pagination=False,
search=False,
sort=False,
),
rx.text(State.iab_safety_response_msg),
),
header=rx.heading("Keywords", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
rx.cond(
State.error_occured,
rx.text(State.iab_response_msg),
rx.data_table(
data=State.df,
# pagination=True,
# search=True,
# sort=True,
),
),
header=rx.heading("IAB Categories", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
),
(
"Brands",
rx.responsive_grid(
rx.card(
rx.vstack(
rx.text(State.pff1),
rx.data_table(
data=State.df1,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl1,font_style="italic"),
),
header=rx.heading("Global Brands To Target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
# rx.text(State.indian_brands_response_msg),
rx.vstack(
rx.text(State.pff2),
rx.data_table(
data=State.df2,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl2,font_style="italic"),
),
header=rx.heading("Indian Brands To Target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
),
(
"Website Inventory",
rx.responsive_grid(
rx.card(
# rx.text(State.web_inventory_response_msg),
rx.vstack(
rx.text(State.pff3),
rx.data_table(
data=State.df3,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl3,font_style="italic"),
),
header=rx.heading("Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
# rx.text(State.ind_web_response_msg),
rx.vstack(
rx.text(State.pff4),
rx.data_table(
data=State.df4,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl4,font_style="italic"),
),
header=rx.heading("Indian Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
),
(
"News Inventory",
rx.responsive_grid(
rx.card(
# rx.text(State.news_response_msg),
rx.vstack(
rx.text(State.pff5),
rx.data_table(
data=State.df5,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl5,font_style="italic"),
),
header=rx.heading("News Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
# rx.text(State.news_ind_response_msg),
rx.vstack(
rx.text(State.pff6),
rx.data_table(
data=State.df6,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl6,font_style="italic"),
),
header=rx.heading("Indian News Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
)
],
),
),
animation="fadeInAnimation ease 3s",
)
),
spacing="1.5em",
font_size="1em",
padding="3%",
shadow="lg",
border_radius="lg",
),
width="100%",
height="auto",
#
)
def about():
return rx.text("About Page")
# Add state and page to the app.
app = rx.App(state=State,stylesheets=[
"styles/fontstyles.css","styles/center-simple.css","styles/introjs.min.css" # This path is relative to assets/
],style=style,scripts="intro.js")
app.add_page(index,title="Contextual Demo")
app.add_page(about, route="/about")
app.compile()
| [
"Suggest list of top 10 Indian brands whose tone of voice resonates with following tags - PLACEHOLDER - PLACEHOLDER. Return response as a table with single column of Brand names without index",
"Suggest the right list of top 10 News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"PLACEHOLDER",
"Please respond as a content expert and consultant and do not include disclaimers.",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"Suggest list of top 10 global brands whose tone of voice resonates with following tags - PLACEHOLDER - PLACEHOLDER.Return response as a table with single column of Brand names without index",
"Suggest the right list of top 10 Indian website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"Suggest the right list of top 10 Indian News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"[PLACEHOLDER, PLACEHOLDER]",
"content",
"Suggest the right list of top 10 global website inventory to run the aboce brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"Please respond as a content expert and consultant and do not include disclaimers.Please provide information in tabular format or in a format which can be parsed into a table using a python API.",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"Please take a guess of the IAB categories for an article with following image tags and caption - PLACEHOLDER - PLACEHOLDER. Return a table with single column of IAB category list "
] |
2024-01-10 | programmeddeath1/zebra-crssing | futureapp~pineapp_scroll.py | """Welcome to Reflex! This file outlines the steps to create a basic app."""
from rxconfig import config
from typing import List
import reflex as rx
import os, glob, json, shutil
from time import sleep
import subprocess
import re
import pandas as pd
from io import StringIO
import requests
import io
import random
from PIL import Image
from newspaper import Article
import urllib.request
import openai
openai.organization = "org-MGwFb1CrjeNZupHeaarBRyrN"
openai.api_key = os.getenv("OPENAI_API_KEY") or "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"
ram_url = "http://172.31.27.12:3000"
# from ram.models import tag2text_caption
# from ram import inference_tag2text as inference
# from ram import get_transform
# pretrained = '/home/dt/Projects/Work/citrusberry/recognize-anything/tag2text_swin_14m.pth'
# image_size = 384
# thre = 0.68
# specified_tags = 'None'
# # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device='cpu'
# transform = get_transform(image_size=image_size)
# delete_tag_index = [127,2961, 3351, 3265, 3338, 3355, 3359]
# # @st.cache_data # 👈 Add the caching decorator
# # @rx.memo
# def load_model():
# model = tag2text_caption(pretrained=pretrained,
# image_size=image_size,
# vit='swin_b',
# delete_tag_index=delete_tag_index)
# model.threshold = thre # threshold for tagging
# model.eval()
# model = model.to(device)
# return model
# model = load_model()
filename = f"{config.app_name}/{config.app_name}.py"
colindex = 0
accent_color = "#a883ed"
style = {
"background" : "rgb(250, 250, 250)",
# "font_family": "AirCerealMedium",
# "font_family": "'Ariel', sans-serif",
"font_size": "16px",
# "font_weight": "",
"::selection": {
"background_color": accent_color,
},
"th" : {
"background":"linear-gradient(45deg,#e6e4fc,#fceded)",
},
#Disco button
#Disco Button
rx.ResponsiveGrid: {
"animation": "fadeInAnimation ease 3s",
"animation-iteration-count": "1",
"animation-fill-mode": "forwards",
},
rx.Heading: {
# "font_size": "32px",
"font_family": "AirCereal",
# "font_family": "'Ariel', sans-serif",
"font_weight": "700",
"color": "#a61d55",
},
rx.Text: {
"font_family": "AirCerealNormalText",
"line-height" : "1.7",
# "font_weight": "100",
"font_size": "16px",
"font-weight": "normal",
# "font-variant": "normal"
},
rx.Card: {
"border-radius" : "16px",
# "box-shadow" : "5px 10px",
# "box-shadow" : "rgb(204, 219, 232) 3px 3px 6px 0px inset, rgba(255, 255, 255, 0.5) -3px -3px 6px 1px inset;"
"box-shadow" : "6px 6px 12px #b8b9be,-6px -6px 12px #fff!important",
"padding" : "10px 20px",
"margin" : "10px 20px",
# "background" : ""
},
rx.Badge: {
"padding" : "10px 20px!important",
"margin" : "10px 20px!important",
"text-transform" : "lowercase!important",
"border-radius" : "5px!important",
"box-shadow" : "5px 5px #000000!important",
},
rx.Slider: {
"height": "5px",
"overflow": "hidden",
"background": "#fff",
# "border" : "1px solid #29d",
},
rx.DataTable: {
"background": "linear-gradient(45deg,#e6e4fc,#fceded)",
}
# rx.SliderFilledTrack: {
# "position": "absolute",
# "top": "0",
# "right": "100%",
# "height": "5px",
# "width": "100%",
# "background": "#29d",
# }
}
class ArticleData:
def __init__(self, data_dir='data'):
self.data_dir = data_dir
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
self.data_file = os.path.join(self.data_dir, 'articles.json')
if not os.path.exists(self.data_file):
with open(self.data_file, 'w') as f:
json.dump([], f)
def store_article(self, article):
"""
Store article data to json file
"""
with open(self.data_file, 'r+') as f:
articles = json.load(f)
articles.append(article)
f.seek(0) # reset file position to the beginning.
json.dump(articles, f, indent=4)
def get_articles(self):
"""
Fetch all articles from json file
"""
with open(self.data_file, 'r') as f:
articles = json.load(f)
return articles
def delete_articles(self,article_url):
"""
Delete a specific article from json file
"""
with open(self.data_file, 'r+') as f:
articles = json.load(f)
articles = [article for article in articles if article['url'] != article_url]
f.seek(0) # reset file position to the beginning.
f.truncate() # remove existing file content.
json.dump(articles, f, indent=4)
article_data = ArticleData()
articles = article_data.get_articles()
a_options : List[str] = [datavalue['url'] for datavalue in articles]
b_options = []
def fetch_preloaded(self,input_url,article):
screendata = article
global model
global articles
self.processing,self.complete = False,False
yield
sleep(2)
import pdb;pdb. set_trace()
class State(rx.State):
# The colors to cycle through.
global a_options
colors: List[str] = [
"black",
"red",
"green",
"blue",
"purple",
]
# selected_option_a: str = "No selection yet."
# selected_option_a: str = a_options[0]
selected_option_a: str = ""
text: str = "Enter Page URL"
#text: str = a_options[0]
processing = False
complete = False
error_occured = False
image_url = ""
model_tags = ""
model_caption = ""
alert_msg = ""
alert_show: bool = False
alert_msg_header = ""
article_text = ""
article_title = ""
model_tag_list: List[str] = ['1','2']
# The index of the current color.
index: int = 0
df = pd.DataFrame([[]])
df1 = pd.DataFrame([[]])
pff1 = ""
pfl1 = ""
df2 = pd.DataFrame([[]])
pff2 = ""
pfl2 = ""
df3 = pd.DataFrame([[]])
pff3 = ""
pfl3 = ""
df4 = pd.DataFrame([[]])
pff4 = ""
pfl4 = ""
df5 = pd.DataFrame([[]])
pff5 = ""
pfl5 = ""
df6 = pd.DataFrame([[]])
pff6 = ""
pfl6 = ""
#Openai model outputs
iab_safety_response_msg = ""
iab_response_msg = ""
global_brands_response_msg = ""
indian_brands_response_msg = ""
web_inventory_response_msg = ""
ind_web_response_msg = ""
news_response_msg = ""
news_ind_response_msg = ""
sentiment_filled_bg = "red"
sentiment_empty_bg = "green.100"
sentiment_color = sentiment_empty_bg.split('.')[0]
sentiment_value = 0
sff1 = ""
sentiment_disp_value = 0
keyword_list = pd.DataFrame([[]])
# keyword_list: List[str] = [["1"]]
def next_color(self):
"""Cycle to the next color."""
self.index = (self.index + 1) % len(self.colors)
@rx.var
def color(self) -> str:
return self.colors[self.index]
def clear_text(self):
# import pdb;pdb.set_trace()
self.text = ""
def run_analysis(self):
screendata = {}
global model
global articles
self.processing,self.complete = False,False
yield
if self.text == "":
self.text = self.selected_option_a
if self.text == "Select an example." or self.text == "No selection yet." or self.text == "":
self.alert_msg_header = "Error"
self.alert_msg = "Please enter url link or select a link from the dropdown"
self.alert_change()
self.processing,self.complete = False,True
yield
return
self.processing,self.complete = True,False
self.iab_safety_response_msg = ""
self.iab_safety_response_msg = ""
self.iab_response_msg = ""
self.global_brands_response_msg = ""
self.indian_brands_response_msg = ""
self.web_inventory_response_msg = ""
self.ind_web_response_msg = ""
self.news_response_msg = ""
self.news_ind_response_msg = ""
self.sentiment_filled_bg = "red"
self.sentiment_empty_bg = "green.100"
self.sentiment_value = 0
self.sff1 = ""
self.sentiment_disp_value = 0
yield
for article in articles:
if article['url'] == self.text:
screendata = article
article_image = screendata["image"]
imgpost = article_image.split('.')[-1]
article_title = screendata["title"]
print(f"Article image file is - {article_image}")
sleep(1)
try:
os.system(f"wget {article_image} -O downloads/mainimage.{imgpost}")
list_uploaded_file = f"downloads/mainimage.{imgpost}"
uploaded_file = list_uploaded_file
ui_uploaded_file = f'mainimage.{imgpost}'
shutil.move(uploaded_file,f'assets/{ui_uploaded_file}')
uploaded_file = "assets/"+ui_uploaded_file
except IndexError as error:
print("Image file doesnt exist")
uploaded_file = "doesntexist.txt"
except Exception as error:
print(error)
uploaded_file = "doesntexist.txt"
if article_title and os.path.exists(uploaded_file):
print("Main Execution")
self.processing,self.complete = False,True
image = Image.open(uploaded_file)
self.image_url = image
self.article_title = article_title
self.article_text = screendata["text"]
self.model_tag_list = screendata["tags"]
self.model_caption = screendata["caption"]
try:
self.sff1 = screendata["sff1"]
self.keyword_list = pd.DataFrame(screendata["keyword_list"])
self.sentiment_disp_value = screendata["sentiment_disp_value"]
self.sentiment_value = int(self.sentiment_disp_value * 10)
self.sentiment_disp_value = f"{self.sentiment_value}%"
self.sentiment_filled_bg = screendata["sentiment_filled_bg"]
self.sentiment_empty_bg = screendata["sentiment_empty_bg"]
self.sentiment_color = screendata["sentiment_empty_bg"].split('.')[0]
except Exception as error:
print(error)
try:
self.iab_safety_response_msg = screendata["sff1"]
except:
print(error)
yield
print("Get IAB Categories")
try:
self.df = pd.DataFrame(screendata["df"])
except Exception as error:
print(error)
self.error_occured = True
self.iab_response_msg = screendata["df"]
yield
print("Get Brands")
try:
self.pff1 = screendata["pff1"]
self.df1 = pd.DataFrame(screendata["df1"])
self.pfl1 = screendata["pfl1"]
except Exception as error:
print(error)
self.pff1 = screendata["pff1"]
yield
print("Indian Brands")
try:
self.pff2 = screendata["pff2"]
self.df2 = pd.DataFrame(screendata["df2"])
self.pfl2 = screendata["pfl2"]
except Exception as error:
print(error)
self.pff2 = screendata["pff2"]
yield
print("Websites")
try:
self.pff3 = screendata["pff3"]
self.df3 = pd.DataFrame(screendata["df3"])
self.pfl3 = screendata["pfl3"]
except Exception as error:
print(error)
self.pff3 = screendata["pff3"]
yield
print("Indian Websites")
try:
self.pff4 = screendata["pff4"]
self.df4 = pd.DataFrame(screendata["df4"])
self.pfl4 = screendata["pfl4"]
except Exception as error:
print(error)
self.pff4 = screendata["pff4"]
yield
print("News")
try:
self.pff5 = screendata["pff5"]
self.df5 = pd.DataFrame(screendata["df5"])
self.pfl5 = screendata["pfl5"]
except Exception as error:
print(error)
self.pff5 = screendata["pfl5"]
yield
print("News India")
try:
self.pff6 = screendata["pff6"]
self.df6 = pd.DataFrame(screendata["df6"])
self.pfl6 = screendata["pfl6"]
except Exception as error:
print(error)
self.pff6 = screendata["pff6"]
yield
return True
elif uploaded_file == "parisingerror":
self.alert_msg_header = "Error"
self.alert_msg = "Failed to parse url"
self.alert_change()
self.processing,self.complete = False,True
yield
elif not os.path.exists(uploaded_file):
self.alert_msg_header = "Error"
self.alert_msg = "Failed to load image"
self.alert_change()
self.processing,self.complete = False,True
yield
elif not article.title:
self.alert_msg_header = "Error"
self.alert_msg = "Failed to load data file"
self.alert_change()
self.processing,self.complete = False,True
yield
else:
self.alert_msg_header = "Error"
self.alert_msg = "Unknown Error"
self.alert_change()
self.processing,self.complete = False,True
yield
return True
removestatus = [os.remove(file) for file in glob.glob('downloads/*')]
removestatus = [os.remove(file) for file in glob.glob('assets/mainimage*')]
article = Article(self.text)
article.download()
# article.html
article.parse()
article_image = article.top_image
imgpost = article_image.split('.')[-1]
print(f"Article image file is - {article_image}")
try:
# Bug 001 - Websites like khaleej times mask image url, download through wget fails
#os.system(f"wget {article_image} -O downloads/mainimage.{imgpost}")
if 'jpg' in article_image or 'png' in article_image or 'jpeg' in article_image:
os.system(f"wget {article_image} -O downloads/mainimage.{imgpost}")
else:
response = requests.get(article_image)
imgpost = 'jpg'
# Check if the request was successful
if response.status_code == 200:
# Write the content of the response to a file
with open(f'downloads/mainimage.{imgpost}', 'wb') as file:
file.write(response.content)
list_uploaded_file = f"downloads/mainimage.{imgpost}"
uploaded_file = list_uploaded_file
ui_uploaded_file = f'mainimage.{imgpost}'
shutil.move(uploaded_file,f'assets/{ui_uploaded_file}')
uploaded_file = "assets/"+ui_uploaded_file
except IndexError as error:
print("Image file doesnt exist")
uploaded_file = "doesntexist.txt"
except Exception as error:
print(error)
uploaded_file = "doesntexist.txt"
if article.title and os.path.exists(uploaded_file):
print("Main Execution")
# image = Image.open(uploaded_file)
# self.image_url = ui_uploaded_file
print(uploaded_file)
image = Image.open(uploaded_file)
self.image_url = image
if image.mode == 'RGBA':
image = image.convert('RGB')
# image = transform(image).unsqueeze(0).to(device)
byte_arr = io.BytesIO()
image.save(byte_arr, format='JPEG')
byte_arr = byte_arr.getvalue()
# POST request to the Flask API
url = f"{ram_url}/analyze_image"
try:
response = requests.post(
url,
files={
'image': ('image.jpg', byte_arr, 'image/jpeg')
}
, timeout=10
)
except Exception as connerror:
print(connerror)
self.alert_msg_header = "Error"
self.alert_msg = "Image Processing Demo Service is Down - Will be up from 9:00 AM to 8:30 AM IST"
self.alert_change()
self.processing,self.complete = False,False
yield
return True
# import pdb;pdb.set_trace()
# Print the response
print(response.json())
res = response.json()
# res = inference(image, model, specified_tags)
self.article_title = article.title
self.article_text = article.text[0:2000]
print("Model Identified Tags: ", res[0])
print("User Specified Tags: ", res[1])
print("Image Caption: ", res[2])
self.model_tag_list = res[0].split('|')
self.model_caption = res[2]
self.processing,self.complete = False,True
yield
# for i in range(0,10):
try:
mesg1 = {"role":"user","content":f"Please analyze the sentiment of the following message, and get the top 10 keywords or keyphrases - {self.article_text}. Rate the sentiment on a scale of 1 to 10 with 10 being extremely positive and 1 being extremely negative. Send response as first paragraph with only one word describing the sentiment of the article - positive.,negative. or neutral. ,preceded by 'Sentiment:' and followed by two next line characters, list of only 10 keywords separated by next line and last paragraph giving the rating.Show only the numeric value of the rating in curly braces."}
# mesg1 = {"role":"user","content":f"Please analyze the sentiment of the following message, and get the top 10 keywords or keyphrases - {self.article_text}. Rate the sentiment on a scale of 1 to 10 with 10 being extremely positive and 1 being extremely negative. Send response as first paragraph with only one word describing the sentiment of the article - positive.,negative. or neutral. ,preceded by 'Sentiment:', list of only 10 keywords separated by next line and last paragraph giving the rating.Show only the numeric value of the rating in curly braces."}
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role":"system","content":f"Please respond as a content expert and consultant and do not include disclaimers."},
mesg1
# {"role":"user","content":f"Suggest brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}"}
]
)
iab_safety_response_msg = response["choices"][0]["message"]
text = iab_safety_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
print(paragraphs)
try:
self.sff1 = paragraphs[0].split('Sentiment:')[-1].split('.')[0].strip() + " - "
except:
self.sff1 = paragraphs[0].split('Sentiment:')[-1].strip() + " - "
# import pdb;pdb.set_trace()
table_content = paragraphs[1]
brand_names = table_content.replace('`','').replace('|','').split('\n')
data = []
for match in brand_names:
if 'Key' in match or 'key' in match or '---' in match or not match or 'rand' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
keyword = match
data.append({'Keywords': keyword})
if len(data) > 10:
break
self.keyword_list = pd.DataFrame(data)
# self.iab_safety_response_msg = iab_safety_response_msg["content"]
para2 = paragraphs[-1] if match not in paragraphs[-1] else ""
match = re.search(r"{([0-9.]+)}$", para2)
if match:
self.sentiment_disp_value = float(match.group(1))
self.sentiment_value = int(self.sentiment_disp_value * 10)
self.sentiment_disp_value = f"{self.sentiment_value}%"
else:
self.sentiment_value = 0
if self.sentiment_value <= 40:
self.sentiment_filled_bg = "red"
self.sentiment_empty_bg = "red.100"
self.sentiment_color = "red"
elif self.sentiment_value >= 60:
self.sentiment_filled_bg = "green"
self.sentiment_empty_bg = "green.100"
self.sentiment_color = "green"
else:
self.sentiment_filled_bg = "grey"
self.sentiment_empty_bg = "grey.100"
self.sentiment_color = "grey"
except Exception as error:
print(error)
try:
self.iab_safety_response_msg = text
except:
print(error)
yield
# st.info(f'X-rae Response - {iab_response_msg["content"]}')
print("Get IAB Categories")
sysmessage = {"role":"system","content":f"Please respond as a content expert and consultant and do not include disclaimers.Please provide information in tabular format or in a format which can be parsed into a table using a python API."}
message1 = {"role":"user","content":f"Please take a guess of the IAB categories for an article with following image tags and caption - {res[0]} - {res[2]}. Return a table with single column of IAB category list "}
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
# {"role":"user","content":f"Suggest brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}"}
]
)
iab_response_msg = response["choices"][0]["message"]
# Convert response to data
text = iab_response_msg["content"]
# matches = re.findall(r'(\w+)\s*->\s*(.*)', text)
matches = re.findall(r'\| (.*) \|\n', text)
data = []
for match in matches:
if 'IAB' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
iab_category = match
# data.append({'Image Tag': image_tag, 'IAB Category': iab_category})
data.append({'IAB Category': iab_category})
# Create a DataFrame from the data
# print(data)
self.df = pd.DataFrame(data)
except Exception as error:
print(error)
self.error_occured = True
self.iab_response_msg = text
# self.iab_response_msg = iab_response_msg["content"]
yield
print("Get Brands")
# import pdb;pdb.set_trace()
reply1 = {"role":"assistant","content":f"{iab_response_msg['content']}"}
# for trial in range(0,10):
message2 = {"role":"user","content":f"Suggest list of top 10 global brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}.Return response as a table with single column of Brand names without index"}
#Return a table as comma-separated values with single column of Brand names
# # st.info(f'X-rae Response - {iab_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2
]
)
global_brands_response_msg = response["choices"][0]["message"]
text = global_brands_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract last paragraph
# last_paragraph = re.sub(paragraph_pattern, '', text, count=1, flags=re.MULTILINE | re.DOTALL)
# Extract the first paragraph
self.pff1 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
brand_names = table_content.replace('`','').replace('|','').split('\n')
data = []
for match in brand_names:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
iab_category = match
# data.append({'Image Tag': image_tag, 'IAB Category': iab_category})
data.append({'Brand Names': iab_category})
# Create a DataFrame from the data
# print(data)
self.df1 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df1 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl1 = paragraphs[-1] if match not in paragraphs[-1] else ""
# self.global_brands_response_msg = global_brands_response_msg["content"]
except Exception as error:
print(error)
self.pff1 = text
yield
print("Indian Brands")
reply2 = {"role":"assistant","content":f"{global_brands_response_msg['content']}"}
message3 = {"role":"user","content":f"Suggest list of top 10 Indian brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}. Return response as a table with single column of Brand names without index"}
# # st.info(f'X-rae Response - {global_brands_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
]
)
indian_brands_response_msg = response["choices"][0]["message"]
# self.indian_brands_response_msg = indian_brands_response_msg["content"]
text = indian_brands_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff2 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
brand_names = table_content.replace('`','').replace('|','').split('\n')
data = []
for match in brand_names:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
iab_category = match
# data.append({'Image Tag': image_tag, 'IAB Category': iab_category})
data.append({'Brand Names': iab_category})
self.df2 = pd.DataFrame(data)
# # Create a DataFrame from the table content
# self.df2 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl2 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff2 = text
yield
print("Websites")
reply3 = {"role":"assistant","content":f"{indian_brands_response_msg['content']}"}
message4 = {"role":"user","content":f"Suggest the right list of top 10 global website inventory to run the aboce brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {indian_brands_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
]
)
web_inventory_response_msg = response["choices"][0]["message"]
# self.web_inventory_response_msg = web_inventory_response_msg["content"]
text = web_inventory_response_msg["content"]
if "apologize" in text or 'unfortunately' in text or 'Unfortunately' in text or "Apologize" in text or "Apologise" in text or "apologise" in text:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
]
)
web_inventory_response_msg = response["choices"][0]["message"]
# self.web_inventory_response_msg = web_inventory_response_msg["content"]
text = web_inventory_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff3 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df3 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df3 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# self.df3 = pd.DataFrame([row.split(':') for row in table_content], columns=['Category', 'Websites'])
# Strip leading and trailing whitespace from the DataFrame
# self.df3 = self.df3.apply(lambda x: x.str.strip())
# Extract the last paragraph
self.pfl3 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff3 = text
yield
print("Indian Websites")
reply4 = {"role":"assistant","content":f"{web_inventory_response_msg['content']}"}
message5 = {"role":"user","content":f"Suggest the right list of top 10 Indian website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {web_inventory_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
]
)
# import pdb;pdb.set_trace()
ind_web_response_msg = response["choices"][0]["message"]
# self.ind_web_response_msg = ind_web_response_msg["content"]
text = ind_web_response_msg["content"]
print(text)
if "apologize" in text or 'unfortunately' in text or 'Unfortunately' in text or "Apologize" in text or "Apologise" in text or "apologise" in text:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
]
)
# import pdb;pdb.set_trace()
ind_web_response_msg = response["choices"][0]["message"]
# self.ind_web_response_msg = ind_web_response_msg["content"]
text = ind_web_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff4 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df4 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df4 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# self.df4 = pd.DataFrame([row.split(':') for row in table_content], columns=['Category', 'Websites'])
# self.df4 = self.df4.apply(lambda x: x.str.strip())
# Extract the last paragraph
self.pfl4 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff4 = text
yield
print("News")
reply5 = {"role":"assistant","content":f"{ind_web_response_msg['content']}"}
message6 = {"role":"user","content":f"Suggest the right list of top 10 News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {ind_web_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
reply5,
message6,
]
)
# import pdb;pdb.set_trace()
news_response_msg = response["choices"][0]["message"]
# self.news_response_msg = news_response_msg["content"]
text = news_response_msg["content"]
if "apologize" in text or 'Unfortunately' in text or "Apologize" in text or "Apologise" in text or "apologise" in text:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
reply5,
message6,
]
)
# import pdb;pdb.set_trace()
news_response_msg = response["choices"][0]["message"]
# self.news_response_msg = news_response_msg["content"]
text = news_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff5 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df5 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df5 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl5 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff5 = text
yield
print("News India")
reply6 = {"role":"assistant","content":f"{news_response_msg['content']}"}
message7 = {"role":"user","content":f"Suggest the right list of top 10 Indian News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {news_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
reply5,
message6,
reply6,
message7,
]
)
# import pdb;pdb.set_trace()
news_ind_response_msg = response["choices"][0]["message"]
# self.news_ind_response_msg = news_ind_response_msg["content"]
text = news_ind_response_msg["content"]
if "apologize" in text or 'Unfortunately' in text or "Apologize" in text or "Apologise" in text or "apologise" in text:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
reply5,
message6,
reply6,
message7,
]
)
# import pdb;pdb.set_trace()
news_ind_response_msg = response["choices"][0]["message"]
# self.news_ind_response_msg = news_ind_response_msg["content"]
text = news_ind_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff6 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df6 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df6 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl6 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff6 = text
yield
return True
# st.info(f'X-rae Response - {news_ind_response_msg["content"]}')
elif not os.path.exists(uploaded_file):
self.alert_msg_header = "Error"
self.alert_msg = "Failed to load image"
self.alert_change()
self.processing,self.complete = False,True
yield
# return rx.window_alert("Failed to load image")
# st.error("Failed to load image")
elif not article.title:
self.alert_msg_header = "Error"
self.alert_msg = "Failed to load data file"
self.alert_change()
self.processing,self.complete = False,True
yield
# return rx.window_alert("Failed to load data file ")
# st.error("Failed to load data file ")
else:
self.alert_msg_header = "Error"
self.alert_msg = "Unknown Error"
self.alert_change()
self.processing,self.complete = False,True
yield
# return rx.window_alert("Failed to load data file ")
# print(f"Files not found - {uploaded_file} - 'out.json'")
def alert_change(self):
self.alert_show = not (self.alert_show)
def tag_list(tag: str):
# index = random.randint(0,4)
global colindex
# import pdb;pdb.set_trace()
if colindex == 0:
colindex = 1
else:
colindex = 0
print(f"Color index is {colindex}")
colorval = ["#a61d55","#991BE2"]
return rx.badge(
tag, variant="solid",
background="transparent",
line_height="1.42",
# bg="#fff",
color=f"{colorval[colindex]}",##991BE2
border_color=f"{colorval[colindex]}",##991BE2
border_width="1px",
border_style= "solid",
font_size="1em",
font_weight="normal",
text_transform = "lowercase",
border_radius = "1.41em",
cursor = "pointer",
# box_shadow = "5px 5px #000000",
margin = "6px",
padding = "0.7em 1.4em"
)
def colored_box(color: str):
return rx.box(rx.text(color), bg=color)
def index() -> rx.Component:
return rx.fragment(
rx.hstack(
rx.image(src="https://citrusberry.biz/assets/img/menu_logo1.png", width="41px", height="auto"),
rx.image(src="https://citrusberry.biz/assets/img/menu_logo.png", width="90px", height="auto"),
padding="10px",
margin="5px",
),
# rx.color_mode_button(rx.color_mode_icon(), float="right"),
rx.vstack(
rx.tooltip(
rx.card(
rx.center(
rx.image(src="logo-no-background.png", width="200px", height="auto"),
# rx.heading("X-Rae Output", size="xl", color="#fb5e78"),
# border_radius="15px",
# border_width="thick",
width="100%",
# border_color="#fb5e78",
),
),
# background="linear-gradient(90deg, #ff5c72, #a485f2)",
# rx.heading("Contextual AI Demo!", font_size="2em",color="#a61d55",),
label="Please enter or select a link and click on Analyze",
),
rx.alert_dialog(
rx.alert_dialog_overlay(
rx.alert_dialog_content(
rx.alert_dialog_header(State.alert_msg_header),
rx.alert_dialog_body(
State.alert_msg
),
rx.alert_dialog_footer(
rx.button(
"Close",
on_click=State.alert_change,
)
),
)
),
is_open=State.alert_show,
),
# rx.box("Get started by editing ", rx.code(filename, font_size="1em")),
rx.accordion(
rx.center(
rx.accordion_item(
rx.accordion_button(
rx.tooltip(
rx.text(
"Select an example from dropdown"
),
label="Click here to see Examples",
),
rx.accordion_icon(),
),
rx.accordion_panel(
rx.center(
rx.tooltip(
rx.select(
a_options,
placeholder="select.",
# on_change=State.set_selected_option_a,
# on_change=State.fetch_preloaded,
on_change=State.set_text,
),
label="Please select a link and Click on Analyze",
),
width="1000px"
),
),
),
),
allow_multiple=True,
width="80%",
font_size="16px",
# width="1000px",
),
rx.center(
rx.tooltip(
rx.icon(
# tag="link",margin_right="10px",on_click=rx.set_clipboard(State.selected_option_a),
tag="link",margin_right="10px",
),
# label="Copy Link",
label="Please enter a url link and click on Analyze",
),
rx.tooltip(
rx.input(
# placeholder="Enter the page url",
on_blur=State.set_text,
width="100%",
value=State.text,
on_change=State.set_text,
# default_value=State.text,
),
label="You can enter a custom url link and click on Analyze",
),
width="1000px"
),
rx.hstack(
# rx.button(
# "Clear", on_click=State.clear_text,width="100%",
# ),
rx.html("""
<button class='btn-101'>
Analyse
<svg>
<defs>
<filter id='glow'>
<fegaussianblur result='coloredBlur' stddeviation='5'></fegaussianblur>
<femerge>
<femergenode in='coloredBlur'></femergenode>
<femergenode in='coloredBlur'></femergenode>
<femergenode in='coloredBlur'></femergenode>
<femergenode in='SourceGraphic'></femergenode>
</femerge>
</filter>
</defs>
<rect />
</svg>
</button>
""",on_click=State.run_analysis),
# rx.button(
# "Analyze", on_click=State.run_analysis,is_loading=State.processing,width="100%",
# background_image="linear-gradient(90deg, #ff5c72, #a485f2)",
# ),
),
rx.cond(
State.processing,
rx.circular_progress(is_indeterminate=True,color="#a61d55"
),
),
rx.cond(
State.complete,
rx.responsive_grid(
rx.vstack(
# rx.divider(border_color="#a61d55"),
rx.heading(State.article_title, size="lg",margin="30px",
),
# rx.hstack(
rx.responsive_grid(
rx.card(
rx.center(
rx.image(
src=State.image_url,
height="25em",
width="37.5em",
),
border_radius="10px",
border_width="2px",
border_color="#a61d55",
width="100%",
),
header=rx.heading("Article Image", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
rx.text(State.article_text),
header=rx.heading("Article Text", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
# rx.divider(border_color="black"),
rx.responsive_grid(
rx.card(
rx.center(
rx.vstack(
rx.hstack(
rx.foreach(State.model_tag_list,tag_list),rx.spacer(),
),
rx.hstack(
rx.heading(State.model_caption, size="lg", ),
),
),
),
background="linear-gradient(45deg,#e6e4fc,#fceded)",
header=rx.heading("X RAE Image Analysis", size="lg"),
),
columns=[1],
spacing="4",
width="100%",
),
rx.responsive_grid(
# rx.divider(border_color="black"),
rx.card(
rx.center(
rx.vstack(
# rx.hstack(
rx.heading(
State.sff1+State.sentiment_disp_value, color=State.sentiment_filled_bg,opacity="0.8"
),
# ),
# rx.hstack(
rx.progress(value=State.sentiment_value, width="100%",color_scheme=State.sentiment_color,height="15px",bg="#fff",opacity="0.8"),
# ),
width="75%",
),
),
background="linear-gradient(45deg,#e6e4fc,#fceded)",
header=rx.heading("Overall Sentiment", size="lg"),
),
columns=[1],
spacing="4",
width="100%",
),
rx.responsive_grid(
rx.card(
rx.vstack(
# rx.heading(State.sff1),
# rx.heading(
# State.sff1+State.sentiment_disp_value, color=State.sentiment_filled_bg
# ),
# rx.progress(value=State.sentiment_value, width="100%",color_scheme=State.sentiment_color,height="15px"),
# rx.slider(
# rx.slider_track(
# rx.slider_filled_track(bg=State.sentiment_filled_bg),
# bg=State.sentiment_empty_bg,
# height="5px",
# ),
# # rx.slider_thumb(
# # rx.icon(tag="star", color="white"),
# # box_size="1.5em",
# # bg="tomato",
# # ),
# # on_change_end=SliderManual.set_end,
# value=State.sentiment_value,
# default_value=40,
# is_disabled=True,
# height="5px",
# ),
rx.data_table(
data=State.keyword_list,
pagination=False,
search=False,
sort=False,
),
rx.text(State.iab_safety_response_msg),
),
header=rx.heading("Keywords", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
rx.cond(
State.error_occured,
rx.text(State.iab_response_msg),
rx.data_table(
data=State.df,
# pagination=True,
# search=True,
# sort=True,
),
),
header=rx.heading("IAB Categories", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
rx.responsive_grid(
rx.card(
rx.vstack(
rx.text(State.pff1),
rx.data_table(
data=State.df1,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl1,font_style="italic"),
),
header=rx.heading("Global Brands To Target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
# rx.text(State.indian_brands_response_msg),
rx.vstack(
rx.text(State.pff2),
rx.data_table(
data=State.df2,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl2,font_style="italic"),
),
header=rx.heading("Indian Brands To Target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
rx.responsive_grid(
rx.card(
# rx.text(State.web_inventory_response_msg),
rx.vstack(
rx.text(State.pff3),
rx.data_table(
data=State.df3,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl3,font_style="italic"),
),
header=rx.heading("Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
# rx.text(State.ind_web_response_msg),
rx.vstack(
rx.text(State.pff4),
rx.data_table(
data=State.df4,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl4,font_style="italic"),
),
header=rx.heading("Indian Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
rx.responsive_grid(
rx.card(
# rx.text(State.news_response_msg),
rx.vstack(
rx.text(State.pff5),
rx.data_table(
data=State.df5,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl5,font_style="italic"),
),
header=rx.heading("News Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
# rx.text(State.news_ind_response_msg),
rx.vstack(
rx.text(State.pff6),
rx.data_table(
data=State.df6,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl6,font_style="italic"),
),
header=rx.heading("Indian News Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
),
animation="fadeInAnimation ease 3s",
)
),
spacing="1.5em",
font_size="1em",
padding="3%",
shadow="lg",
border_radius="lg",
),
width="100%",
height="auto",
#
)
def about():
return rx.text("About Page")
# Add state and page to the app.
app = rx.App(state=State,stylesheets=[
"styles/fontstyles.css","styles/center-simple.css","styles/introjs.min.css" # This path is relative to assets/
],style=style,scripts="intro.js")
app.add_page(index,title="Contextual Demo")
app.add_page(about, route="/about")
app.compile()
| [
"Suggest list of top 10 Indian brands whose tone of voice resonates with following tags - PLACEHOLDER - PLACEHOLDER. Return response as a table with single column of Brand names without index",
"Suggest the right list of top 10 News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"PLACEHOLDER",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"Please respond as a content expert and consultant and do not include disclaimers.",
"Suggest list of top 10 global brands whose tone of voice resonates with following tags - PLACEHOLDER - PLACEHOLDER.Return response as a table with single column of Brand names without index",
"Suggest the right list of top 10 Indian News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"Suggest the right list of top 10 global website inventory to run the aboce brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"[PLACEHOLDER, PLACEHOLDER]",
"content",
"Suggest the right list of top 10 Indian website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"Please respond as a content expert and consultant and do not include disclaimers.Please provide information in tabular format or in a format which can be parsed into a table using a python API.",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"Please take a guess of the IAB categories for an article with following image tags and caption - PLACEHOLDER - PLACEHOLDER. Return a table with single column of IAB category list "
] |
2024-01-10 | programmeddeath1/zebra-crssing | futureapp~pineapp_jul25.py | """Welcome to Reflex! This file outlines the steps to create a basic app."""
from rxconfig import config
from typing import List
import reflex as rx
import os, glob, json, shutil
from time import sleep
import subprocess
import re
import pandas as pd
from io import StringIO
import requests
import io
import random
from PIL import Image
from newspaper import Article
import urllib.request
#import torch
import openai
openai.organization = "org-MGwFb1CrjeNZupHeaarBRyrN"
openai.api_key = os.getenv("OPENAI_API_KEY") or "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"
ram_url = "http://65.1.128.92:3000"
# from ram.models import tag2text_caption
# from ram import inference_tag2text as inference
# from ram import get_transform
# pretrained = '/home/dt/Projects/Work/citrusberry/recognize-anything/tag2text_swin_14m.pth'
# image_size = 384
# thre = 0.68
# specified_tags = 'None'
# # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device='cpu'
# transform = get_transform(image_size=image_size)
# delete_tag_index = [127,2961, 3351, 3265, 3338, 3355, 3359]
# # @st.cache_data # 👈 Add the caching decorator
# # @rx.memo
# def load_model():
# model = tag2text_caption(pretrained=pretrained,
# image_size=image_size,
# vit='swin_b',
# delete_tag_index=delete_tag_index)
# model.threshold = thre # threshold for tagging
# model.eval()
# model = model.to(device)
# return model
# model = load_model()
filename = f"{config.app_name}/{config.app_name}.py"
colindex = 0
accent_color = "#a883ed"
style = {
"background" : "rgb(250, 250, 250)",
# "font_family": "AirCerealMedium",
# "font_family": "'Ariel', sans-serif",
"font_size": "16px",
# "font_weight": "",
"::selection": {
"background_color": accent_color,
},
"th" : {
"background":"linear-gradient(45deg,#e6e4fc,#fceded)",
},
#Disco button
#Disco Button
rx.ResponsiveGrid: {
"animation": "fadeInAnimation ease 3s",
"animation-iteration-count": "1",
"animation-fill-mode": "forwards",
},
rx.Heading: {
# "font_size": "32px",
"font_family": "AirCereal",
# "font_family": "'Ariel', sans-serif",
"font_weight": "700",
"color": "#a61d55",
},
rx.Text: {
"font_family": "AirCerealNormalText",
"line-height" : "1.7",
# "font_weight": "100",
"font_size": "16px",
"font-weight": "normal",
# "font-variant": "normal"
},
rx.Card: {
"border-radius" : "16px",
# "box-shadow" : "5px 10px",
# "box-shadow" : "rgb(204, 219, 232) 3px 3px 6px 0px inset, rgba(255, 255, 255, 0.5) -3px -3px 6px 1px inset;"
"box-shadow" : "6px 6px 12px #b8b9be,-6px -6px 12px #fff!important",
"padding" : "10px 20px",
"margin" : "10px 20px",
# "background" : ""
},
rx.Badge: {
"padding" : "10px 20px!important",
"margin" : "10px 20px!important",
"text-transform" : "lowercase!important",
"border-radius" : "5px!important",
"box-shadow" : "5px 5px #000000!important",
},
rx.Slider: {
"height": "5px",
"overflow": "hidden",
"background": "#fff",
# "border" : "1px solid #29d",
},
rx.DataTable: {
"background": "linear-gradient(45deg,#e6e4fc,#fceded)",
}
# rx.SliderFilledTrack: {
# "position": "absolute",
# "top": "0",
# "right": "100%",
# "height": "5px",
# "width": "100%",
# "background": "#29d",
# }
}
class ArticleData:
def __init__(self, data_dir='data'):
self.data_dir = data_dir
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
self.data_file = os.path.join(self.data_dir, 'articles.json')
if not os.path.exists(self.data_file):
with open(self.data_file, 'w') as f:
json.dump([], f)
def store_article(self, article):
"""
Store article data to json file
"""
with open(self.data_file, 'r+') as f:
articles = json.load(f)
articles.append(article)
f.seek(0) # reset file position to the beginning.
json.dump(articles, f, indent=4)
def get_articles(self):
"""
Fetch all articles from json file
"""
with open(self.data_file, 'r') as f:
articles = json.load(f)
return articles
def delete_articles(self,article_url):
"""
Delete a specific article from json file
"""
with open(self.data_file, 'r+') as f:
articles = json.load(f)
articles = [article for article in articles if article['url'] != article_url]
f.seek(0) # reset file position to the beginning.
f.truncate() # remove existing file content.
json.dump(articles, f, indent=4)
article_data = ArticleData()
articles = article_data.get_articles()
a_options : List[str] = [datavalue['url'] for datavalue in articles]
b_options = []
class State(rx.State):
# The colors to cycle through.
global a_options
colors: List[str] = [
"black",
"red",
"green",
"blue",
"purple",
]
# selected_option_a: str = "No selection yet."
selected_option_a: str = a_options[0]
text: str = "Enter Page url"
processing = False
complete = False
error_occured = False
image_url = ""
model_tags = ""
model_caption = ""
alert_msg = ""
alert_show: bool = False
alert_msg_header = ""
article_text = ""
article_title = ""
model_tag_list: List[str] = ['1','2']
# The index of the current color.
index: int = 0
df = pd.DataFrame([[]])
df1 = pd.DataFrame([[]])
pff1 = ""
pfl1 = ""
df2 = pd.DataFrame([[]])
pff2 = ""
pfl2 = ""
df3 = pd.DataFrame([[]])
pff3 = ""
pfl3 = ""
df4 = pd.DataFrame([[]])
pff4 = ""
pfl4 = ""
df5 = pd.DataFrame([[]])
pff5 = ""
pfl5 = ""
df6 = pd.DataFrame([[]])
pff6 = ""
pfl6 = ""
#Openai model outputs
iab_safety_response_msg = ""
iab_response_msg = ""
global_brands_response_msg = ""
indian_brands_response_msg = ""
web_inventory_response_msg = ""
ind_web_response_msg = ""
news_response_msg = ""
news_ind_response_msg = ""
sentiment_filled_bg = "red"
sentiment_empty_bg = "green.100"
sentiment_color = sentiment_empty_bg.split('.')[0]
sentiment_value = 0
sff1 = ""
sentiment_disp_value = 0
keyword_list = pd.DataFrame([[]])
# keyword_list: List[str] = [["1"]]
def next_color(self):
"""Cycle to the next color."""
self.index = (self.index + 1) % len(self.colors)
@rx.var
def color(self) -> str:
return self.colors[self.index]
def clear_text(self):
# import pdb;pdb.set_trace()
self.text = ""
def run_analysis(self):
screendata = {}
global model
global articles
self.processing,self.complete = False,False
yield
if self.text == "":
self.alert_msg_header = "Error"
self.alert_msg = "Please enter url link"
self.alert_change()
self.processing,self.complete = False,True
yield
return
self.processing,self.complete = True,False
self.iab_safety_response_msg = ""
self.iab_safety_response_msg = ""
self.iab_response_msg = ""
self.global_brands_response_msg = ""
self.indian_brands_response_msg = ""
self.web_inventory_response_msg = ""
self.ind_web_response_msg = ""
self.news_response_msg = ""
self.news_ind_response_msg = ""
self.sentiment_filled_bg = "red"
self.sentiment_empty_bg = "green.100"
self.sentiment_value = 0
self.sff1 = ""
self.sentiment_disp_value = 0
yield
removestatus = [os.remove(file) for file in glob.glob('downloads/*')]
removestatus = [os.remove(file) for file in glob.glob('assets/mainimage*')]
article = Article(self.text)
article.download()
# article.html
article.parse()
# os.system(f"scrapy crawl image_scraper -a start_url='{self.text}' -o downloads/out.json")
# proc = subprocess.run(["scrapy", "crawl", "image_scraper","-a",f"start_url={self.text}", "-o", "assets/mainimage.json"])
# while (proc.poll() is None):
# print("Process still running")
article_image = article.top_image
imgpost = article_image.split('.')[-1]
print(f"Article image file is - {article_image}")
# list_uploaded_file = glob.glob('downloads/*.jpeg') or glob.glob('downloads/*.jpg') or glob.glob('downloads/*.png')
try:
# proc = subprocess.run(["wget", f"\'{article_image}\'", "-o", f"downloads/mainimage.{imgpost}"])
urllib.request.urlretrieve(article_image,f"downloads/mainimage.{imgpost}")
# list_uploaded_file = wget.download(article_image)
list_uploaded_file = f"downloads/mainimage.{imgpost}"
uploaded_file = list_uploaded_file
ui_uploaded_file = f'mainimage.{imgpost}'
shutil.move(uploaded_file,f'assets/{ui_uploaded_file}')
uploaded_file = "assets/"+ui_uploaded_file
except IndexError as error:
print("Image file doesnt exist")
uploaded_file = "doesntexist.txt"
except Exception as error:
print(error)
uploaded_file = "doesntexist.txt"
if article.title and os.path.exists(uploaded_file):
print("Main Execution")
# image = Image.open(uploaded_file)
# self.image_url = ui_uploaded_file
print(uploaded_file)
image = Image.open(uploaded_file)
self.image_url = image
# image = transform(image).unsqueeze(0).to(device)
byte_arr = io.BytesIO()
image.save(byte_arr, format='JPEG')
byte_arr = byte_arr.getvalue()
# POST request to the Flask API
url = f"{ram_url}/analyze_image"
response = requests.post(
url,
files={
'image': ('image.jpg', byte_arr, 'image/jpeg')
}
)
# import pdb;pdb.set_trace()
# Print the response
print(response.json())
res = response.json()
# res = inference(image, model, specified_tags)
self.processing,self.complete = False,True
self.article_title = article.title
self.article_text = article.text[0:2000]
print("Model Identified Tags: ", res[0])
print("User Specified Tags: ", res[1])
print("Image Caption: ", res[2])
self.model_tag_list = res[0].split('|')
self.model_caption = res[2]
yield
# for i in range(0,10):
try:
mesg1 = {"role":"user","content":f"Please analyze the sentiment of the following message, and get the top 10 keywords or keyphrases - {self.article_text}. Rate the sentiment on a scale of 1 to 10 with 10 being extremely positive and 1 being extremely negative. Send response as first paragraph with only one word describing the sentiment of the article - positive.,negative. or neutral. ,preceded by 'Sentiment:' and followed by two next line characters, list of only 10 keywords separated by next line and last paragraph giving the rating.Show only the numeric value of the rating in curly braces."}
# mesg1 = {"role":"user","content":f"Please analyze the sentiment of the following message, and get the top 10 keywords or keyphrases - {self.article_text}. Rate the sentiment on a scale of 1 to 10 with 10 being extremely positive and 1 being extremely negative. Send response as first paragraph with only one word describing the sentiment of the article - positive.,negative. or neutral. ,preceded by 'Sentiment:', list of only 10 keywords separated by next line and last paragraph giving the rating.Show only the numeric value of the rating in curly braces."}
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role":"system","content":f"Please respond as a content expert and consultant and do not include disclaimers."},
mesg1
# {"role":"user","content":f"Suggest brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}"}
]
)
iab_safety_response_msg = response["choices"][0]["message"]
text = iab_safety_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
print(paragraphs)
try:
self.sff1 = paragraphs[0].split('Sentiment:')[-1].split('.')[0].strip() + " - "
except:
self.sff1 = paragraphs[0].split('Sentiment:')[-1].strip() + " - "
# import pdb;pdb.set_trace()
table_content = paragraphs[1]
brand_names = table_content.replace('`','').replace('|','').split('\n')
data = []
for match in brand_names:
if 'Key' in match or 'key' in match or '---' in match or not match or 'rand' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
keyword = match
data.append({'Keywords': keyword})
if len(data) > 10:
break
self.keyword_list = pd.DataFrame(data)
# self.iab_safety_response_msg = iab_safety_response_msg["content"]
para2 = paragraphs[-1] if match not in paragraphs[-1] else ""
match = re.search(r"{([0-9.]+)}$", para2)
if match:
self.sentiment_disp_value = float(match.group(1))
self.sentiment_value = int(self.sentiment_disp_value * 10)
self.sentiment_disp_value = f"{self.sentiment_value}%"
else:
self.sentiment_value = 0
if self.sentiment_value <= 40:
self.sentiment_filled_bg = "red"
self.sentiment_empty_bg = "red.100"
elif self.sentiment_value >= 60:
self.sentiment_filled_bg = "green"
self.sentiment_empty_bg = "green.100"
else:
self.sentiment_filled_bg = "grey"
self.sentiment_empty_bg = "grey.100"
except Exception as error:
print(error)
try:
self.iab_safety_response_msg = text
except:
print(error)
yield
# st.info(f'X-rae Response - {iab_response_msg["content"]}')
print("Get IAB Categories")
sysmessage = {"role":"system","content":f"Please respond as a content expert and consultant and do not include disclaimers.Please provide information in tabular format or in a format which can be parsed into a table using a python API."}
message1 = {"role":"user","content":f"Please take a guess of the IAB categories for an article with following image tags and caption - {res[0]} - {res[2]}. Return a table with single column of IAB category list "}
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
# {"role":"user","content":f"Suggest brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}"}
]
)
iab_response_msg = response["choices"][0]["message"]
# Convert response to data
text = iab_response_msg["content"]
# matches = re.findall(r'(\w+)\s*->\s*(.*)', text)
matches = re.findall(r'\| (.*) \|\n', text)
data = []
for match in matches:
if 'IAB' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
iab_category = match
# data.append({'Image Tag': image_tag, 'IAB Category': iab_category})
data.append({'IAB Category': iab_category})
# Create a DataFrame from the data
# print(data)
self.df = pd.DataFrame(data)
except Exception as error:
print(error)
self.error_occured = True
self.iab_response_msg = text
# self.iab_response_msg = iab_response_msg["content"]
yield
print("Get Brands")
# import pdb;pdb.set_trace()
reply1 = {"role":"assistant","content":f"{iab_response_msg['content']}"}
# for trial in range(0,10):
message2 = {"role":"user","content":f"Suggest list of top 10 global brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}.Return response as a table with single column of Brand names without index"}
#Return a table as comma-separated values with single column of Brand names
# # st.info(f'X-rae Response - {iab_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2
]
)
global_brands_response_msg = response["choices"][0]["message"]
text = global_brands_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract last paragraph
# last_paragraph = re.sub(paragraph_pattern, '', text, count=1, flags=re.MULTILINE | re.DOTALL)
# Extract the first paragraph
self.pff1 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
brand_names = table_content.replace('`','').replace('|','').split('\n')
data = []
for match in brand_names:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
iab_category = match
# data.append({'Image Tag': image_tag, 'IAB Category': iab_category})
data.append({'Brand Names': iab_category})
# Create a DataFrame from the data
# print(data)
self.df1 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df1 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl1 = paragraphs[-1] if match not in paragraphs[-1] else ""
# self.global_brands_response_msg = global_brands_response_msg["content"]
except Exception as error:
print(error)
self.pff1 = text
yield
print("Indian Brands")
reply2 = {"role":"assistant","content":f"{global_brands_response_msg['content']}"}
message3 = {"role":"user","content":f"Suggest list of top 10 Indian brands whose tone of voice resonates with following tags - {res[0]} - {res[2]}. Return response as a table with single column of Brand names without index"}
# # st.info(f'X-rae Response - {global_brands_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
]
)
indian_brands_response_msg = response["choices"][0]["message"]
# self.indian_brands_response_msg = indian_brands_response_msg["content"]
text = indian_brands_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff2 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
brand_names = table_content.replace('`','').replace('|','').split('\n')
data = []
for match in brand_names:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
# image_tag = match[0]
# iab_category = match[1]
iab_category = match
# data.append({'Image Tag': image_tag, 'IAB Category': iab_category})
data.append({'Brand Names': iab_category})
self.df2 = pd.DataFrame(data)
# # Create a DataFrame from the table content
# self.df2 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl2 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff2 = text
yield
print("Websites")
reply3 = {"role":"assistant","content":f"{indian_brands_response_msg['content']}"}
message4 = {"role":"user","content":f"Suggest the right list of top 10 global website inventory to run the aboce brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {indian_brands_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
]
)
web_inventory_response_msg = response["choices"][0]["message"]
# self.web_inventory_response_msg = web_inventory_response_msg["content"]
text = web_inventory_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff3 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df3 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df3 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# self.df3 = pd.DataFrame([row.split(':') for row in table_content], columns=['Category', 'Websites'])
# Strip leading and trailing whitespace from the DataFrame
# self.df3 = self.df3.apply(lambda x: x.str.strip())
# Extract the last paragraph
self.pfl3 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff3 = text
yield
print("Indian Websites")
reply4 = {"role":"assistant","content":f"{web_inventory_response_msg['content']}"}
message5 = {"role":"user","content":f"Suggest the right list of top 10 Indian website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {web_inventory_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
]
)
# import pdb;pdb.set_trace()
ind_web_response_msg = response["choices"][0]["message"]
# self.ind_web_response_msg = ind_web_response_msg["content"]
text = ind_web_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff4 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df4 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df4 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# self.df4 = pd.DataFrame([row.split(':') for row in table_content], columns=['Category', 'Websites'])
# self.df4 = self.df4.apply(lambda x: x.str.strip())
# Extract the last paragraph
self.pfl4 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff4 = text
yield
print("News")
reply5 = {"role":"assistant","content":f"{ind_web_response_msg['content']}"}
message6 = {"role":"user","content":f"Suggest the right list of top 10 News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {ind_web_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
reply5,
message6,
]
)
# import pdb;pdb.set_trace()
news_response_msg = response["choices"][0]["message"]
# self.news_response_msg = news_response_msg["content"]
text = news_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff5 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df5 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df5 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl5 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff5 = text
yield
print("News India")
reply6 = {"role":"assistant","content":f"{news_response_msg['content']}"}
message7 = {"role":"user","content":f"Suggest the right list of top 10 Indian News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name"}
# # st.info(f'X-rae Response - {news_response_msg["content"]}')
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
sysmessage,
message1,
reply1,
message2,
reply2,
message3,
reply3,
message4,
reply4,
message5,
reply5,
message6,
reply6,
message7,
]
)
# import pdb;pdb.set_trace()
news_ind_response_msg = response["choices"][0]["message"]
# self.news_ind_response_msg = news_ind_response_msg["content"]
text = news_ind_response_msg["content"]
print(text)
paragraphs = text.split('\n\n')
# Extract the first paragraph
self.pff6 = paragraphs[0]
# Extract the table content
table_content = paragraphs[1]
matches = table_content.split('\n')
data = []
for match in matches:
if 'IAB' in match or '---' in match or not match or 'rand' in match:
continue
iab_category,webname = match.split(',')
data.append({'Website Name': webname, 'IAB Category': iab_category})
self.df6 = pd.DataFrame(data)
# Create a DataFrame from the table content
# self.df6 = pd.read_csv(StringIO(table_content), skipinitialspace=True)
# Extract the last paragraph
self.pfl6 = paragraphs[-1] if match not in paragraphs[-1] else ""
except Exception as error:
print(error)
self.pff6 = text
yield
return True
# st.info(f'X-rae Response - {news_ind_response_msg["content"]}')
elif not os.path.exists(uploaded_file):
self.alert_msg_header = "Error"
self.alert_msg = "Failed to load image"
self.alert_change()
self.processing,self.complete = False,True
yield
# return rx.window_alert("Failed to load image")
# st.error("Failed to load image")
elif not article.title:
self.alert_msg_header = "Error"
self.alert_msg = "Failed to load data file"
self.alert_change()
self.processing,self.complete = False,True
yield
# return rx.window_alert("Failed to load data file ")
# st.error("Failed to load data file ")
else:
self.alert_msg_header = "Error"
self.alert_msg = "Unknown Error"
self.alert_change()
self.processing,self.complete = False,True
yield
# return rx.window_alert("Failed to load data file ")
# print(f"Files not found - {uploaded_file} - 'out.json'")
def alert_change(self):
self.alert_show = not (self.alert_show)
def tag_list(tag: str):
# index = random.randint(0,4)
global colindex
# import pdb;pdb.set_trace()
if colindex == 0:
colindex = 1
else:
colindex = 0
print(f"Color index is {colindex}")
colorval = ["#a61d55","#991BE2"]
return rx.badge(
tag, variant="solid",
background="transparent",
line_height="1.42",
# bg="#fff",
color=f"{colorval[colindex]}",##991BE2
border_color=f"{colorval[colindex]}",##991BE2
border_width="1px",
border_style= "solid",
font_size="1em",
font_weight="normal",
text_transform = "lowercase",
border_radius = "1.41em",
cursor = "pointer",
# box_shadow = "5px 5px #000000",
margin = "6px",
padding = "0.7em 1.4em"
)
def colored_box(color: str):
return rx.box(rx.text(color), bg=color)
def index() -> rx.Component:
return rx.fragment(
rx.hstack(
rx.image(src="https://citrusberry.biz/assets/img/menu_logo1.png", width="41px", height="auto"),
rx.image(src="https://citrusberry.biz/assets/img/menu_logo.png", width="90px", height="auto"),
padding="10px",
margin="5px",
),
# rx.color_mode_button(rx.color_mode_icon(), float="right"),
rx.vstack(
rx.tooltip(
rx.card(
rx.center(
rx.image(src="logo-no-background.png", width="200px", height="auto"),
# rx.heading("X-Rae Output", size="xl", color="#fb5e78"),
# border_radius="15px",
# border_width="thick",
width="100%",
# border_color="#fb5e78",
),
),
# background="linear-gradient(90deg, #ff5c72, #a485f2)",
# rx.heading("Contextual AI Demo!", font_size="2em",color="#a61d55",),
label="Please select a link and Click on Analyze",
),
rx.alert_dialog(
rx.alert_dialog_overlay(
rx.alert_dialog_content(
rx.alert_dialog_header(State.alert_msg_header),
rx.alert_dialog_body(
State.alert_msg
),
rx.alert_dialog_footer(
rx.button(
"Close",
on_click=State.alert_change,
)
),
)
),
is_open=State.alert_show,
),
# rx.box("Get started by editing ", rx.code(filename, font_size="1em")),
rx.center(
rx.tooltip(
rx.input(
# placeholder="Enter the page url",
on_blur=State.set_text,
width="100%",
# value=State.text,
),
label="Please Enter a url link and Click on Analyze",
),
width="1000px"
),
rx.hstack(
# rx.button(
# "Clear", on_click=State.clear_text,width="100%",
# ),
rx.html("""
<button class='btn-101'>
Analyse
<svg>
<defs>
<filter id='glow'>
<fegaussianblur result='coloredBlur' stddeviation='5'></fegaussianblur>
<femerge>
<femergenode in='coloredBlur'></femergenode>
<femergenode in='coloredBlur'></femergenode>
<femergenode in='coloredBlur'></femergenode>
<femergenode in='SourceGraphic'></femergenode>
</femerge>
</filter>
</defs>
<rect />
</svg>
</button>
""",on_click=State.run_analysis),
# rx.button(
# "Analyze", on_click=State.run_analysis,is_loading=State.processing,width="100%",
# background_image="linear-gradient(90deg, #ff5c72, #a485f2)",
# ),
),
rx.cond(
State.complete,
rx.responsive_grid(
rx.vstack(
# rx.divider(border_color="#a61d55"),
rx.heading(State.article_title, size="lg",margin="30px",
),
# rx.hstack(
rx.responsive_grid(
rx.card(
rx.center(
rx.image(
src=State.image_url,
height="25em",
width="37.5em",
),
border_radius="10px",
border_width="2px",
border_color="#a61d55",
width="100%",
),
header=rx.heading("Article Image", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
rx.text(State.article_text),
header=rx.heading("Article Text", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
# rx.divider(border_color="black"),
rx.responsive_grid(
rx.card(
rx.center(
rx.vstack(
rx.hstack(
rx.foreach(State.model_tag_list,tag_list),rx.spacer(),
),
rx.hstack(
rx.heading(State.model_caption, size="lg", ),
),
),
),
background="linear-gradient(45deg,#e6e4fc,#fceded)",
header=rx.heading("X RAE Image Analysis", size="lg"),
),
columns=[1],
spacing="4",
width="100%",
),
rx.responsive_grid(
# rx.divider(border_color="black"),
rx.card(
rx.center(
rx.vstack(
# rx.hstack(
rx.heading(
State.sff1+State.sentiment_disp_value, color=State.sentiment_filled_bg,opacity="0.8"
),
# ),
# rx.hstack(
rx.progress(value=State.sentiment_value, width="100%",color_scheme=State.sentiment_color,height="15px",bg="#fff",opacity="0.8"),
# ),
width="75%",
),
),
background="linear-gradient(45deg,#e6e4fc,#fceded)",
header=rx.heading("Overall Sentiment", size="lg"),
),
columns=[1],
spacing="4",
width="100%",
),
rx.responsive_grid(
rx.card(
rx.vstack(
# rx.heading(State.sff1),
# rx.heading(
# State.sff1+State.sentiment_disp_value, color=State.sentiment_filled_bg
# ),
# rx.progress(value=State.sentiment_value, width="100%",color_scheme=State.sentiment_color,height="15px"),
# rx.slider(
# rx.slider_track(
# rx.slider_filled_track(bg=State.sentiment_filled_bg),
# bg=State.sentiment_empty_bg,
# height="5px",
# ),
# # rx.slider_thumb(
# # rx.icon(tag="star", color="white"),
# # box_size="1.5em",
# # bg="tomato",
# # ),
# # on_change_end=SliderManual.set_end,
# value=State.sentiment_value,
# default_value=40,
# is_disabled=True,
# height="5px",
# ),
rx.data_table(
data=State.keyword_list,
pagination=False,
search=False,
sort=False,
),
rx.text(State.iab_safety_response_msg),
),
header=rx.heading("Keywords", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
rx.cond(
State.error_occured,
rx.text(State.iab_response_msg),
rx.data_table(
data=State.df,
# pagination=True,
# search=True,
# sort=True,
),
),
header=rx.heading("IAB Categories", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
rx.responsive_grid(
rx.card(
rx.vstack(
rx.text(State.pff1),
rx.data_table(
data=State.df1,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl1,font_style="italic"),
),
header=rx.heading("Global Brands To Target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
# rx.text(State.indian_brands_response_msg),
rx.vstack(
rx.text(State.pff2),
rx.data_table(
data=State.df2,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl2,font_style="italic"),
),
header=rx.heading("Indian Brands To Target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
rx.responsive_grid(
rx.card(
# rx.text(State.web_inventory_response_msg),
rx.vstack(
rx.text(State.pff3),
rx.data_table(
data=State.df3,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl3,font_style="italic"),
),
header=rx.heading("Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
# rx.text(State.ind_web_response_msg),
rx.vstack(
rx.text(State.pff4),
rx.data_table(
data=State.df4,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl4,font_style="italic"),
),
header=rx.heading("Indian Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
rx.responsive_grid(
rx.card(
# rx.text(State.news_response_msg),
rx.vstack(
rx.text(State.pff5),
rx.data_table(
data=State.df5,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl5,font_style="italic"),
),
header=rx.heading("News Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
rx.card(
# rx.text(State.news_ind_response_msg),
rx.vstack(
rx.text(State.pff6),
rx.data_table(
data=State.df6,
# pagination=True,
# search=True,
# sort=True,
),
rx.text(State.pfl6,font_style="italic"),
),
header=rx.heading("Indian News Website Inventory to target", size="lg"),
# footer=rx.heading("Footer", size="sm"),
),
columns=[2],
spacing="4",
),
),
animation="fadeInAnimation ease 3s",
)
),
spacing="1.5em",
font_size="1em",
padding="3%",
shadow="lg",
border_radius="lg",
),
width="100%",
height="auto",
#
)
def about():
return rx.text("About Page")
# Add state and page to the app.
app = rx.App(state=State,stylesheets=[
"styles/fontstyles.css","styles/center-simple.css","styles/introjs.min.css" # This path is relative to assets/
],style=style,scripts="intro.js")
app.add_page(index,title="Contextual Demo")
app.add_page(about, route="/about")
app.compile()
| [
"Suggest list of top 10 Indian brands whose tone of voice resonates with following tags - PLACEHOLDER - PLACEHOLDER. Return response as a table with single column of Brand names without index",
"Suggest the right list of top 10 News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"PLACEHOLDER",
"Please respond as a content expert and consultant and do not include disclaimers.",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"Suggest the right list of top 10 Indian News website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"Suggest the right list of top 10 Indian website inventory to run the following brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"Suggest the right list of top 10 global website inventory to run the aboce brand ads along with the IAB categories. Return a table as comma-separated values with two columns - IAB Category and Website Name",
"[PLACEHOLDER, PLACEHOLDER]",
"content",
"Suggest list of top 10 global brands whose tone of voice resonates with following tags - PLACEHOLDER - PLACEHOLDER.Return response as a table with single column of Brand names without index",
"Please respond as a content expert and consultant and do not include disclaimers.Please provide information in tabular format or in a format which can be parsed into a table using a python API.",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"Please take a guess of the IAB categories for an article with following image tags and caption - PLACEHOLDER - PLACEHOLDER. Return a table with single column of IAB category list "
] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | openai_vpt~lib~masked_attention.py | import functools
import torch as th
from torch import nn
import openai_vpt.lib.xf as xf
from openai_vpt.lib.minecraft_util import store_args
from openai_vpt.lib.tree_util import tree_map
@functools.lru_cache()
def get_band_diagonal_mask(t: int, T: int, maxlen: int, batchsize: int, device: th.device) -> th.Tensor:
"""Returns a band diagonal mask which is causal (upper triangle is masked)
and such that any frame can only view up to maxlen total past frames
including the current frame.
Example Masks: Here 0 means that frame is masked and we mask it by adding a huge number to the attention logits (see orc.xf)
t = 3, T = 3, maxlen = 3
T
t 1 0 0 | mask out T > t
1 1 0 |
1 1 1 |
t = 3, T = 6, maxlen = 3
t 0 1 1 1 0 0 | mask out T > t
0 0 1 1 1 0 |
0 0 0 1 1 1 |
Args:
t: number of rows (presumably number of frames recieving gradient)
T: number of cols (presumably t + past context that isn't being gradient updated)
maxlen: maximum number of frames (including current frame) any frame can attend to
batchsize: number of masks to return
device: torch device to place mask on
Returns:
Boolean mask of shape (batchsize, t, T)
"""
m = th.ones(t, T, dtype=bool)
m.tril_(T - t) # Mask out upper triangle
if maxlen is not None and maxlen < T: # Mask out lower triangle
m.triu_(T - t - maxlen + 1)
m_btT = m[None].repeat_interleave(batchsize, dim=0)
m_btT = m_btT.to(device=device)
return m_btT
def get_mask(first_b11: th.Tensor, state_mask: th.Tensor, t: int, T: int, maxlen: int, heads: int, device) -> th.Tensor:
"""Returns a band diagonal mask that respects masking past states (columns 0:T-t inclusive)
if first_b11 is True. See get_band_diagonal_mask for how the base mask is computed.
This function takes that mask and first zeros out any past context if first_b11 is True.
Say our context is in chunks of length t (so here T = 4t). We see that in the second batch we recieved first=True
context t t t t
first F T F F
Now, given this the mask should mask out anything prior to T < t; however since we don't have access to the past first_b11's
we need to keep a state of the mask at those past timesteps. This is what state_mask is.
In particular state_mask is a [b, t, T - t] mask matrix that contains the mask for the past T - t frames.
Args: (See get_band_diagonal_mask for remaining args)
first_b11: boolean tensor with shape [batchsize, 1, 1] indicating if the first timestep for each batch element had first=True
state_mask: mask tensor of shape [b, t, T - t]
t: number of mask rows (presumably number of frames for which we take gradient)
T: number of mask columns (t + the number of past frames we keep in context)
maxlen: actual context length
heads: number of attention heads
device: torch device
Returns:
m_btT: Boolean mask of shape (batchsize * heads, t, T)
state_mask: updated state_mask
"""
b = first_b11.shape[0]
if state_mask is None:
state_mask = th.zeros((b, 1, T - t), dtype=bool, device=device)
m_btT = get_band_diagonal_mask(t, T, maxlen, b, device).clone() # Should be shape B, t, T
not_first = ~first_b11.to(device=device)
m_btT[:, :, :-t] &= not_first # Zero out anything in the past if first is true
m_btT[:, :, :-t] &= state_mask
m_bhtT = m_btT[:, None].repeat_interleave(heads, dim=1)
m_btT = m_bhtT.reshape((b * heads), t, T)
# Update state_mask such that it reflects the most recent first
state_mask = th.cat(
[
state_mask[:, :, t:] & not_first,
th.ones((b, 1, min(t, T - t)), dtype=bool, device=device),
],
dim=-1,
)
return m_btT, state_mask
class MaskedAttention(nn.Module):
"""
Transformer self-attention layer that removes frames from previous episodes from the hidden state under certain constraints.
The constraints are:
- The "first" flag can only be true for the first timestep of each batch. An assert will fire if other timesteps have first = True.
input_size: The dimension of the input (which also happens to be the size of the output)
memory_size: The number of frames to keep in the inner state. Note that when attending, we will be able to attend
to both the frames in the inner state (which presumably won't have gradients anymore) and the frames
in the batch. "mask" for some additional considerations on this.
heads: The number of attention heads to use. Note that we will split the input into this number of heads, so
input_size needs to be divisible by heads.
timesteps: number of timesteps with which we'll be taking gradient
mask: Can be "none" or "clipped_causal". "clipped_causal" is a normal causal mask but solves the following minor problem:
if you have a state of length 128 and a batch of 128 frames, then the first frame of your batch will be able to
attend to 128 previous frames, but the last one will be able to attend to 255 previous frames. In this example,
"clipped_causal" will make it so that the last frame can only attend to 128 previous frames, so that there is no
bias coming from the position in the batch. None simply allows you to attend to any frame in the state + batch,
which means you can also attend to future frames.
"""
@store_args
def __init__(
self,
input_size,
memory_size: int,
heads: int,
timesteps: int,
mask: str = "clipped_causal",
init_scale=1,
norm="none",
log_scope="sa",
use_muP_factor=False,
):
super().__init__()
assert mask in {"none", "clipped_causal"}
assert memory_size >= 0
self.maxlen = memory_size - timesteps
if mask == "none":
mask = None
self.orc_attn = xf.All2All(heads, self.maxlen, mask=mask is not None)
self.orc_block = xf.SelfAttentionLayer(
input_size,
self.orc_attn,
scale=init_scale,
relattn=True,
cache_keep_len=self.maxlen,
norm=norm,
log_scope=log_scope,
use_muP_factor=use_muP_factor,
)
def initial_state(self, batchsize: int, device=None):
"""Return the initial state mask (None) and the initial state of the transformer (zerod out keys and queries)"""
state = self.orc_block.initial_state(batchsize, initial_T=self.maxlen)
state_mask = None
if device is not None:
state = tree_map(lambda x: x.to(device), state)
return state_mask, state
def forward(self, input_bte, first_bt, state):
"""Forward propagation of a single layer"""
state_mask, xf_state = state
t = first_bt.shape[1]
if self.mask == "clipped_causal":
new_mask, state_mask = get_mask(
first_b11=first_bt[:, [[0]]],
state_mask=state_mask,
t=t,
T=t + self.maxlen,
maxlen=self.maxlen,
heads=self.heads,
device=input_bte.device,
)
self.orc_block.attn.mask = new_mask
output, xf_state = self.orc_block(input_bte, xf_state)
return output, (state_mask, xf_state)
def get_log_keys(self):
# These are logged in xf.SelfAttentionLayer
return [f"activation_{stat}/{self.log_scope}/{k}" for k in ["K", "Q", "V", "A", "Aproj"] for stat in ["mean", "std"]]
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | openai_vpt~lib~torch_util.py | import functools
import itertools
import math
import os
import pickle
import re
import subprocess
import tempfile
from contextlib import contextmanager
from hashlib import md5, sha1
import numpy as np
import torch as th
import torch.distributed as dist
import torch.distributions as dis
import torch.nn.functional as F
from torch import nn
import openai_vpt.lib.tree_util as tree_util
from openai_vpt.lib import misc
def contextmanager_to_decorator(cm):
def decorator(fn):
@functools.wraps(fn)
def newfn(*args, **kwargs):
with cm():
return fn(*args, **kwargs)
return newfn
return decorator
def have_cuda():
return th.has_cuda
def default_device_type():
return "cuda" if have_cuda() else "cpu"
no_grad = contextmanager_to_decorator(th.no_grad)
DEFAULT_DEVICE = th.device(type=default_device_type())
def set_default_torch_device(device):
global DEFAULT_DEVICE
DEFAULT_DEVICE = th.device(device)
def dev():
return DEFAULT_DEVICE
def zeros(*args, **kwargs):
return th.zeros(*args, **kwargs, device=dev())
def ones(*args, **kwargs):
return th.ones(*args, **kwargs, device=dev())
def arange(*args, **kwargs):
return th.arange(*args, **kwargs, device=dev())
def NormedLinear(*args, scale=1.0, dtype=th.float32, **kwargs):
"""
nn.Linear but with normalized fan-in init
"""
dtype = parse_dtype(dtype)
if dtype == th.float32:
out = nn.Linear(*args, **kwargs)
elif dtype == th.float16:
out = LinearF16(*args, **kwargs)
else:
raise ValueError(dtype)
out.weight.data *= scale / out.weight.norm(dim=1, p=2, keepdim=True)
if kwargs.get("bias", True):
out.bias.data *= 0
return out
class LinearF16(nn.Linear):
def forward(self, x):
return F.linear(x, self.weight.half(), self.bias.half() if self.bias is not None else None)
class LayerNormF16(nn.LayerNorm):
def forward(self, x):
return F.layer_norm(x, self.normalized_shape, self.weight.half(), self.bias.half(), self.eps)
def LayerNorm(*args, dtype=th.float32, **kwargs):
dtype = parse_dtype(dtype)
if dtype == th.float32:
out = nn.LayerNorm(*args, **kwargs)
elif dtype == th.float16:
out = LayerNormF16(*args, **kwargs)
else:
raise ValueError(dtype)
out.weight.no_scale = True
return out
def flatten_image(x):
"""
Flattens last three dims
"""
*batch_shape, h, w, c = x.shape
return x.reshape((*batch_shape, h * w * c))
def sequential(layers, x, *args, diag_name=None, use_checkpoint=False):
for (i, layer) in enumerate(layers):
x = layer(x, *args)
return x
@no_grad
def load_average_with_metadata(paths, overrides):
n_models = len(paths)
model, metadata = load_with_metadata(paths[0], overrides=overrides)
for p in model.parameters():
p.mul_(1 / n_models)
for p in paths[1:]:
new_model, _ = load_with_metadata(p, overrides=overrides)
for (n1, p1), (n2, p2) in misc.safezip(model.named_parameters(), new_model.named_parameters()):
assert n1 == n2, f"names {n1} and {n2} don't match"
p1.add_(p2.mul_(1 / n_models))
return model, metadata
def save_kwargs(fn):
"""
This decorator passes through the user-provided kwargs and adds one more, called
save_kwargs, mapping to {"create_fn" : name_of_decorated_fn, "kwargs" : other_kwargs}
You put on this decorator on a function that creates a pytorch module. This will
save the kwargs and the function that was used to create the module.
This lets us restore the model state later.
"""
@functools.wraps(fn)
def wrapper(**kwargs):
if "save_kwargs" in kwargs:
return fn(**kwargs)
else:
sk = {**kwargs, "create_fn": f"{fn.__module__}:{fn.__name__}"}
return fn(save_kwargs=sk, **kwargs)
return wrapper
def parse_dtype(x):
if isinstance(x, th.dtype):
return x
elif isinstance(x, str):
if x == "float32" or x == "float":
return th.float32
elif x == "float64" or x == "double":
return th.float64
elif x == "float16" or x == "half":
return th.float16
elif x == "uint8":
return th.uint8
elif x == "int8":
return th.int8
elif x == "int16" or x == "short":
return th.int16
elif x == "int32" or x == "int":
return th.int32
elif x == "int64" or x == "long":
return th.int64
elif x == "bool":
return th.bool
else:
raise ValueError(f"cannot parse {x} as a dtype")
else:
raise TypeError(f"cannot parse {type(x)} as dtype")
def index(x, i):
"""
Batched, broadcasting index of x along dimension i.ndim.
For example, if x has shape (1, 2, 3, 4, 5) and i has shape (1, 1, 3)
then the result has shape (1, 2, 3, 5) and each value in i must be between 0 and 3.
"""
assert x.ndim >= i.ndim + 1
gather_dim = i.ndim
while i.ndim < x.ndim:
i = i.unsqueeze(-1)
expand_shape = list(x.shape)
expand_shape[gather_dim] = 1
i = i.expand(*expand_shape)
xi = th.gather(x, gather_dim, i)
assert xi.shape[gather_dim] == 1
return xi.squeeze(gather_dim)
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | openai_vpt~lib~policy.py | from copy import deepcopy
from typing import Dict, Optional
import numpy as np
import torch as th
from gym3.types import DictType
from torch import nn
from torch.nn import functional as F
from openai_vpt.lib.action_head import make_action_head
from openai_vpt.lib.action_mapping import CameraHierarchicalMapping
from openai_vpt.lib.impala_cnn import ImpalaCNN
from openai_vpt.lib.normalize_ewma import NormalizeEwma
from openai_vpt.lib.scaled_mse_head import ScaledMSEHead
from openai_vpt.lib.tree_util import tree_map
from openai_vpt.lib.util import FanInInitReLULayer, ResidualRecurrentBlocks
class ImgPreprocessing(nn.Module):
"""Normalize incoming images.
:param img_statistics: remote path to npz file with a mean and std image. If specified
normalize images using this.
:param scale_img: If true and img_statistics not specified, scale incoming images by 1/255.
"""
def __init__(self, img_statistics: Optional[str] = None, scale_img: bool = True):
super().__init__()
self.img_mean = None
if img_statistics is not None:
img_statistics = dict(**np.load(img_statistics))
self.img_mean = nn.Parameter(th.Tensor(img_statistics["mean"]), requires_grad=False)
self.img_std = nn.Parameter(th.Tensor(img_statistics["std"]), requires_grad=False)
else:
self.ob_scale = 255.0 if scale_img else 1.0
def forward(self, img):
x = img.to(dtype=th.float32)
if self.img_mean is not None:
x = (x - self.img_mean) / self.img_std
else:
x = x / self.ob_scale
return x
class ImgObsProcess(nn.Module):
"""ImpalaCNN followed by a linear layer.
:param cnn_outsize: impala output dimension
:param output_size: output size of the linear layer.
:param dense_init_norm_kwargs: kwargs for linear FanInInitReLULayer
:param init_norm_kwargs: kwargs for 2d and 3d conv FanInInitReLULayer
"""
def __init__(
self,
cnn_outsize: int,
output_size: int,
dense_init_norm_kwargs: Dict = {},
init_norm_kwargs: Dict = {},
**kwargs,
):
super().__init__()
self.cnn = ImpalaCNN(
outsize=cnn_outsize,
init_norm_kwargs=init_norm_kwargs,
dense_init_norm_kwargs=dense_init_norm_kwargs,
**kwargs,
)
self.linear = FanInInitReLULayer(
cnn_outsize,
output_size,
layer_type="linear",
**dense_init_norm_kwargs,
)
def forward(self, img):
return self.linear(self.cnn(img))
class MinecraftPolicy(nn.Module):
"""
:param recurrence_type:
None - No recurrence, adds no extra layers
lstm - (Depreciated). Singular LSTM
multi_layer_lstm - Multi-layer LSTM. Uses n_recurrence_layers to determine number of consecututive LSTMs
Does NOT support ragged batching
multi_masked_lstm - Multi-layer LSTM that supports ragged batching via the first vector. This model is slower
Uses n_recurrence_layers to determine number of consecututive LSTMs
transformer - Dense transformer
:param init_norm_kwargs: kwargs for all FanInInitReLULayers.
"""
def __init__(
self,
recurrence_type="lstm",
impala_width=1,
impala_chans=(16, 32, 32),
obs_processing_width=256,
hidsize=512,
single_output=False, # True if we don't need separate outputs for action/value outputs
img_shape=None,
scale_input_img=True,
only_img_input=False,
init_norm_kwargs={},
impala_kwargs={},
# Unused argument assumed by forc.
input_shape=None, # pylint: disable=unused-argument
active_reward_monitors=None,
img_statistics=None,
first_conv_norm=False,
diff_mlp_embedding=False,
attention_mask_style="clipped_causal",
attention_heads=8,
attention_memory_size=2048,
use_pointwise_layer=True,
pointwise_ratio=4,
pointwise_use_activation=False,
n_recurrence_layers=1,
recurrence_is_residual=True,
timesteps=None,
use_pre_lstm_ln=True, # Not needed for transformer
**unused_kwargs,
):
super().__init__()
assert recurrence_type in [
"multi_layer_lstm",
"multi_layer_bilstm",
"multi_masked_lstm",
"transformer",
"none",
]
active_reward_monitors = active_reward_monitors or {}
self.single_output = single_output
chans = tuple(int(impala_width * c) for c in impala_chans)
self.hidsize = hidsize
# Dense init kwargs replaces batchnorm/groupnorm with layernorm
self.init_norm_kwargs = init_norm_kwargs
self.dense_init_norm_kwargs = deepcopy(init_norm_kwargs)
if self.dense_init_norm_kwargs.get("group_norm_groups", None) is not None:
self.dense_init_norm_kwargs.pop("group_norm_groups", None)
self.dense_init_norm_kwargs["layer_norm"] = True
if self.dense_init_norm_kwargs.get("batch_norm", False):
self.dense_init_norm_kwargs.pop("batch_norm", False)
self.dense_init_norm_kwargs["layer_norm"] = True
# Setup inputs
self.img_preprocess = ImgPreprocessing(img_statistics=img_statistics, scale_img=scale_input_img)
self.img_process = ImgObsProcess(
cnn_outsize=256,
output_size=hidsize,
inshape=img_shape,
chans=chans,
nblock=2,
dense_init_norm_kwargs=self.dense_init_norm_kwargs,
init_norm_kwargs=init_norm_kwargs,
first_conv_norm=first_conv_norm,
**impala_kwargs,
)
self.pre_lstm_ln = nn.LayerNorm(hidsize) if use_pre_lstm_ln else None
self.diff_obs_process = None
self.recurrence_type = recurrence_type
self.recurrent_layer = None
self.recurrent_layer = ResidualRecurrentBlocks(
hidsize=hidsize,
timesteps=timesteps,
recurrence_type=recurrence_type,
is_residual=recurrence_is_residual,
use_pointwise_layer=use_pointwise_layer,
pointwise_ratio=pointwise_ratio,
pointwise_use_activation=pointwise_use_activation,
attention_mask_style=attention_mask_style,
attention_heads=attention_heads,
attention_memory_size=attention_memory_size,
n_block=n_recurrence_layers,
)
self.lastlayer = FanInInitReLULayer(hidsize, hidsize, layer_type="linear", **self.dense_init_norm_kwargs)
self.final_ln = th.nn.LayerNorm(hidsize)
def output_latent_size(self):
return self.hidsize
def forward(self, ob, state_in, context):
first = context["first"]
x = self.img_preprocess(ob["img"])
x = self.img_process(x)
if self.diff_obs_process:
processed_obs = self.diff_obs_process(ob["diff_goal"])
x = processed_obs + x
if self.pre_lstm_ln is not None:
x = self.pre_lstm_ln(x)
if self.recurrent_layer is not None:
x, state_out = self.recurrent_layer(x, first, state_in)
else:
state_out = state_in
x = F.relu(x, inplace=False)
x = self.lastlayer(x)
x = self.final_ln(x)
pi_latent = vf_latent = x
if self.single_output:
return pi_latent, state_out
return (pi_latent, vf_latent), state_out
def initial_state(self, batchsize):
if self.recurrent_layer:
return self.recurrent_layer.initial_state(batchsize)
else:
return None
class MinecraftAgentPolicy(nn.Module):
def __init__(self, action_space, policy_kwargs, pi_head_kwargs):
super().__init__()
self.net = MinecraftPolicy(**policy_kwargs)
self.action_space = action_space
self.value_head = self.make_value_head(self.net.output_latent_size())
self.pi_head = self.make_action_head(self.net.output_latent_size(), **pi_head_kwargs)
def make_value_head(self, v_out_size: int, norm_type: str = "ewma", norm_kwargs: Optional[Dict] = None):
return ScaledMSEHead(v_out_size, 1, norm_type=norm_type, norm_kwargs=norm_kwargs)
def make_action_head(self, pi_out_size: int, **pi_head_opts):
return make_action_head(self.action_space, pi_out_size, **pi_head_opts)
def initial_state(self, batch_size: int):
return self.net.initial_state(batch_size)
def reset_parameters(self):
super().reset_parameters()
self.net.reset_parameters()
self.pi_head.reset_parameters()
self.value_head.reset_parameters()
def forward(self, obs, first: th.Tensor, state_in):
if isinstance(obs, dict):
# We don't want to mutate the obs input.
obs = obs.copy()
# If special "mask" key is in obs,
# It's for masking the logits.
# We take it out (the network doesn't need it)
mask = obs.pop("mask", None)
else:
mask = None
(pi_h, v_h), state_out = self.net(obs, state_in, context={"first": first})
pi_logits = self.pi_head(pi_h, mask=mask)
vpred = self.value_head(v_h)
return (pi_logits, vpred, None), state_out
def get_logprob_of_action(self, pd, action):
"""
Get logprob of taking action `action` given probability distribution
(see `get_gradient_for_action` to get this distribution)
"""
ac = tree_map(lambda x: x.unsqueeze(1), action)
log_prob = self.pi_head.logprob(ac, pd)
assert not th.isnan(log_prob).any()
return log_prob[:, 0]
def get_kl_of_action_dists(self, pd1, pd2):
"""
Get the KL divergence between two action probability distributions
"""
return self.pi_head.kl_divergence(pd1, pd2)
def get_output_for_observation(self, obs, state_in, first):
"""
Return gradient-enabled outputs for given observation.
Use `get_logprob_of_action` to get log probability of action
with the given probability distribution.
Returns:
- probability distribution given observation
- value prediction for given observation
- new state
"""
# We need to add a fictitious time dimension everywhere
obs = tree_map(lambda x: x.unsqueeze(1), obs)
first = first.unsqueeze(1)
(pd, vpred, _), state_out = self(obs=obs, first=first, state_in=state_in)
return pd, self.value_head.denormalize(vpred)[:, 0], state_out
@th.no_grad()
def act(self, obs, first, state_in, stochastic: bool = True, taken_action=None, return_pd=False):
# We need to add a fictitious time dimension everywhere
obs = tree_map(lambda x: x.unsqueeze(1), obs)
first = first.unsqueeze(1)
(pd, vpred, _), state_out = self(obs=obs, first=first, state_in=state_in)
if taken_action is None:
ac = self.pi_head.sample(pd, deterministic=not stochastic)
else:
ac = tree_map(lambda x: x.unsqueeze(1), taken_action)
log_prob = self.pi_head.logprob(ac, pd)
assert not th.isnan(log_prob).any()
# After unsqueezing, squeeze back to remove fictitious time dimension
result = {"log_prob": log_prob[:, 0], "vpred": self.value_head.denormalize(vpred)[:, 0]}
if return_pd:
result["pd"] = tree_map(lambda x: x[:, 0], pd)
ac = tree_map(lambda x: x[:, 0], ac)
return ac, state_out, result
@th.no_grad()
def v(self, obs, first, state_in):
"""Predict value for a given mdp observation"""
obs = tree_map(lambda x: x.unsqueeze(1), obs)
first = first.unsqueeze(1)
(pd, vpred, _), state_out = self(obs=obs, first=first, state_in=state_in)
# After unsqueezing, squeeze back
return self.value_head.denormalize(vpred)[:, 0]
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | behavioural_cloning.py | # Basic behavioural cloning
# Note: this uses gradient accumulation in batches of ones
# to perform training.
# This will fit inside even smaller GPUs (tested on 8GB one),
# but is slow.
from argparse import ArgumentParser
import pickle
import time
import gym
import minerl
import torch as th
import numpy as np
from openai_vpt.agent import PI_HEAD_KWARGS, MineRLAgent
from data_loader import DataLoader
from openai_vpt.lib.tree_util import tree_map
# Originally this code was designed for a small dataset of ~20 demonstrations per task.
# The settings might not be the best for the full BASALT dataset (thousands of demonstrations).
# Use this flag to switch between the two settings
USING_FULL_DATASET = True
EPOCHS = 1 if USING_FULL_DATASET else 2
# Needs to be <= number of videos
BATCH_SIZE = 64 if USING_FULL_DATASET else 16
# Ideally more than batch size to create
# variation in datasets (otherwise, you will
# get a bunch of consecutive samples)
# Decrease this (and batch_size) if you run out of memory
N_WORKERS = 100 if USING_FULL_DATASET else 20
DEVICE = "cuda"
LOSS_REPORT_RATE = 100
# Tuned with bit of trial and error
LEARNING_RATE = 0.000181
# OpenAI VPT BC weight decay
# WEIGHT_DECAY = 0.039428
WEIGHT_DECAY = 0.0
# KL loss to the original model was not used in OpenAI VPT
KL_LOSS_WEIGHT = 1.0
MAX_GRAD_NORM = 5.0
MAX_BATCHES = 2000 if USING_FULL_DATASET else int(1e9)
def load_model_parameters(path_to_model_file):
agent_parameters = pickle.load(open(path_to_model_file, "rb"))
policy_kwargs = agent_parameters["model"]["args"]["net"]["args"]
pi_head_kwargs = agent_parameters["model"]["args"]["pi_head_opts"]
pi_head_kwargs["temperature"] = float(pi_head_kwargs["temperature"])
return policy_kwargs, pi_head_kwargs
def behavioural_cloning_train(data_dir, in_model, in_weights, out_weights):
agent_policy_kwargs, agent_pi_head_kwargs = load_model_parameters(in_model)
# To create model with the right environment.
# All basalt environments have the same settings, so any of them works here
env = gym.make("MineRLBasaltFindCave-v0")
agent = MineRLAgent(env, device=DEVICE, policy_kwargs=agent_policy_kwargs, pi_head_kwargs=agent_pi_head_kwargs)
agent.load_weights(in_weights)
# Create a copy which will have the original parameters
original_agent = MineRLAgent(env, device=DEVICE, policy_kwargs=agent_policy_kwargs, pi_head_kwargs=agent_pi_head_kwargs)
original_agent.load_weights(in_weights)
env.close()
policy = agent.policy
original_policy = original_agent.policy
# Freeze most params if using small dataset
for param in policy.parameters():
param.requires_grad = False
# Unfreeze final layers
trainable_parameters = []
for param in policy.net.lastlayer.parameters():
param.requires_grad = True
trainable_parameters.append(param)
for param in policy.pi_head.parameters():
param.requires_grad = True
trainable_parameters.append(param)
# Parameters taken from the OpenAI VPT paper
optimizer = th.optim.Adam(
trainable_parameters,
lr=LEARNING_RATE,
weight_decay=WEIGHT_DECAY
)
data_loader = DataLoader(
dataset_dir=data_dir,
n_workers=N_WORKERS,
batch_size=BATCH_SIZE,
n_epochs=EPOCHS,
)
start_time = time.time()
# Keep track of the hidden state per episode/trajectory.
# DataLoader provides unique id for each episode, which will
# be different even for the same trajectory when it is loaded
# up again
episode_hidden_states = {}
dummy_first = th.from_numpy(np.array((False,))).to(DEVICE)
loss_sum = 0
for batch_i, (batch_images, batch_actions, batch_episode_id) in enumerate(data_loader):
batch_loss = 0
for image, action, episode_id in zip(batch_images, batch_actions, batch_episode_id):
if image is None and action is None:
# A work-item was done. Remove hidden state
if episode_id in episode_hidden_states:
removed_hidden_state = episode_hidden_states.pop(episode_id)
del removed_hidden_state
continue
agent_action = agent._env_action_to_agent(action, to_torch=True, check_if_null=True)
if agent_action is None:
# Action was null
continue
agent_obs = agent._env_obs_to_agent({"pov": image})
if episode_id not in episode_hidden_states:
episode_hidden_states[episode_id] = policy.initial_state(1)
agent_state = episode_hidden_states[episode_id]
pi_distribution, _, new_agent_state = policy.get_output_for_observation(
agent_obs,
agent_state,
dummy_first
)
with th.no_grad():
original_pi_distribution, _, _ = original_policy.get_output_for_observation(
agent_obs,
agent_state,
dummy_first
)
log_prob = policy.get_logprob_of_action(pi_distribution, agent_action)
kl_div = policy.get_kl_of_action_dists(pi_distribution, original_pi_distribution)
# Make sure we do not try to backprop through sequence
# (fails with current accumulation)
new_agent_state = tree_map(lambda x: x.detach(), new_agent_state)
episode_hidden_states[episode_id] = new_agent_state
# Finally, update the agent to increase the probability of the
# taken action.
# Remember to take mean over batch losses
loss = (-log_prob + KL_LOSS_WEIGHT * kl_div) / BATCH_SIZE
batch_loss += loss.item()
loss.backward()
th.nn.utils.clip_grad_norm_(trainable_parameters, MAX_GRAD_NORM)
optimizer.step()
optimizer.zero_grad()
loss_sum += batch_loss
if batch_i % LOSS_REPORT_RATE == 0:
time_since_start = time.time() - start_time
print(f"Time: {time_since_start:.2f}, Batches: {batch_i}, Avrg loss: {loss_sum / LOSS_REPORT_RATE:.4f}")
loss_sum = 0
if batch_i > MAX_BATCHES:
break
state_dict = policy.state_dict()
th.save(state_dict, out_weights)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--data-dir", type=str, required=True, help="Path to the directory containing recordings to be trained on")
parser.add_argument("--in-model", required=True, type=str, help="Path to the .model file to be finetuned")
parser.add_argument("--in-weights", required=True, type=str, help="Path to the .weights file to be finetuned")
parser.add_argument("--out-weights", required=True, type=str, help="Path where finetuned weights will be saved")
args = parser.parse_args()
behavioural_cloning_train(args.data_dir, args.in_model, args.in_weights, args.out_weights)
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | run_agent.py | from argparse import ArgumentParser
import pickle
import aicrowd_gym
import minerl
from openai_vpt.agent import MineRLAgent
def main(model, weights, env, n_episodes=3, max_steps=int(1e9), show=False):
# Using aicrowd_gym is important! Your submission will not work otherwise
env = aicrowd_gym.make(env)
agent_parameters = pickle.load(open(model, "rb"))
policy_kwargs = agent_parameters["model"]["args"]["net"]["args"]
pi_head_kwargs = agent_parameters["model"]["args"]["pi_head_opts"]
pi_head_kwargs["temperature"] = float(pi_head_kwargs["temperature"])
agent = MineRLAgent(env, policy_kwargs=policy_kwargs, pi_head_kwargs=pi_head_kwargs)
agent.load_weights(weights)
for _ in range(n_episodes):
obs = env.reset()
for _ in range(max_steps):
action = agent.get_action(obs)
# ESC is not part of the predictions model.
# For baselines, we just set it to zero.
# We leave proper execution as an exercise for the participants :)
action["ESC"] = 0
obs, _, done, _ = env.step(action)
if show:
env.render()
if done:
break
env.close()
if __name__ == "__main__":
parser = ArgumentParser("Run pretrained models on MineRL environment")
parser.add_argument("--weights", type=str, required=True, help="Path to the '.weights' file to be loaded.")
parser.add_argument("--model", type=str, required=True, help="Path to the '.model' file to be loaded.")
parser.add_argument("--env", type=str, required=True)
parser.add_argument("--show", action="store_true", help="Render the environment.")
args = parser.parse_args()
main(args.model, args.weights, args.env, show=args.show)
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | openai_vpt~lib~action_mapping.py | import abc
import itertools
from collections import OrderedDict
from typing import Dict, List
import numpy as np
from gym3.types import DictType, Discrete, TensorType
from openai_vpt.lib.actions import Buttons
class ActionMapping(abc.ABC):
"""Class that maps between the standard MC factored action space and a new one you define!
:param n_camera_bins: Need to specify this to define the original ac space for stats code
"""
# This is the default buttons groups, it can be changed for your action space
BUTTONS_GROUPS = OrderedDict(
hotbar=["none"] + [f"hotbar.{i}" for i in range(1, 10)],
fore_back=["none", "forward", "back"],
left_right=["none", "left", "right"],
sprint_sneak=["none", "sprint", "sneak"],
use=["none", "use"],
drop=["none", "drop"],
attack=["none", "attack"],
jump=["none", "jump"],
)
def __init__(self, n_camera_bins: int = 11):
assert n_camera_bins % 2 == 1, "n_camera_bins should be odd"
self.n_camera_bins = n_camera_bins
self.camera_null_bin = n_camera_bins // 2
self.stats_ac_space = DictType(
**{
"buttons": TensorType(shape=(len(Buttons.ALL),), eltype=Discrete(2)),
"camera": TensorType(shape=(2,), eltype=Discrete(n_camera_bins)),
}
)
@abc.abstractmethod
def from_factored(self, ac: Dict) -> Dict:
"""Converts a factored action (ac) to the new space
:param ac: Dictionary of actions that must have a batch dimension
"""
pass
@abc.abstractmethod
def to_factored(self, ac: Dict) -> Dict:
"""Converts an action in the new space (ac) to the factored action space.
:param ac: Dictionary of actions that must have a batch dimension
"""
pass
@abc.abstractmethod
def get_action_space_update(self):
"""Return a magym (gym3) action space. This will be used to update the env action space."""
pass
@abc.abstractmethod
def get_zero_action(self):
"""Return the zero or null action for this action space"""
pass
def factored_buttons_to_groups(self, ac_buttons: np.ndarray, button_group: List[str]) -> List[str]:
"""For a mutually exclusive group of buttons in button_group, find which option
in the group was chosen. Assumes that each button group has the option of 'none'
meaning that no button in the group was pressed.
:param ac_buttons: button actions from the factored action space. Should dims [B, len(Buttons.ALL)]
:param button_group: List of buttons in a mutually exclusive group. Each item in the
list should appear in Buttons.ALL except for the special case 'none' which means
no button in the group was pressed. e.g. ['none', 'forward', 'back']. For now
'none' must be the first element of button_group
Returns a list of length B, where each element is an item from button_group.
"""
assert ac_buttons.shape[1] == len(
Buttons.ALL
), f"There should be {len(Buttons.ALL)} buttons in the factored buttons space"
assert button_group[0] == "none", "This function only works if 'none' is in button_group"
# Actions in ac_buttons with order according to button_group
group_indices = [Buttons.ALL.index(b) for b in button_group if b != "none"]
ac_choices = ac_buttons[:, group_indices]
# Special cases for forward/back, left/right where mutual press means do neither
if "forward" in button_group and "back" in button_group:
ac_choices[np.all(ac_choices, axis=-1)] = 0
if "left" in button_group and "right" in button_group:
ac_choices[np.all(ac_choices, axis=-1)] = 0
ac_non_zero = np.where(ac_choices)
ac_choice = ["none" for _ in range(ac_buttons.shape[0])]
# Iterate over the non-zero indices so that if two buttons in a group were pressed at the same time
# we give priority to the button later in the group. E.g. if hotbar.1 and hotbar.2 are pressed during the same
# timestep, hotbar.2 is marked as pressed
for index, action in zip(ac_non_zero[0], ac_non_zero[1]):
ac_choice[index] = button_group[action + 1] # the zero'th index will mean no button pressed
return ac_choice
class CameraHierarchicalMapping(ActionMapping):
"""Buttons are joint as in ButtonsJointMapping, but now a camera on/off meta action is added into this joint space.
When this meta action is triggered, the separate camera head chooses a camera action which is also now a joint space.
:param n_camera_bins: number of camera bins in the factored space
"""
# Add camera meta action to BUTTONS_GROUPS
BUTTONS_GROUPS = ActionMapping.BUTTONS_GROUPS.copy()
BUTTONS_GROUPS["camera"] = ["none", "camera"]
BUTTONS_COMBINATIONS = list(itertools.product(*BUTTONS_GROUPS.values())) + ["inventory"]
BUTTONS_COMBINATION_TO_IDX = {comb: i for i, comb in enumerate(BUTTONS_COMBINATIONS)}
BUTTONS_IDX_TO_COMBINATION = {i: comb for i, comb in enumerate(BUTTONS_COMBINATIONS)}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.camera_groups = OrderedDict(
camera_x=[f"camera_x{i}" for i in range(self.n_camera_bins)],
camera_y=[f"camera_y{i}" for i in range(self.n_camera_bins)],
)
self.camera_combinations = list(itertools.product(*self.camera_groups.values()))
self.camera_combination_to_idx = {comb: i for i, comb in enumerate(self.camera_combinations)}
self.camera_idx_to_combination = {i: comb for i, comb in enumerate(self.camera_combinations)}
self.camera_null_idx = self.camera_combination_to_idx[
(f"camera_x{self.camera_null_bin}", f"camera_y{self.camera_null_bin}")
]
self._null_action = {
"buttons": self.BUTTONS_COMBINATION_TO_IDX[tuple("none" for _ in range(len(self.BUTTONS_GROUPS)))]
}
self._precompute_to_factored()
def _precompute_to_factored(self):
"""Precompute the joint action -> factored action matrix."""
button_dim = self.stats_ac_space["buttons"].size
self.BUTTON_IDX_TO_FACTORED = np.zeros((len(self.BUTTONS_IDX_TO_COMBINATION), button_dim), dtype=int)
self.BUTTON_IDX_TO_CAMERA_META_OFF = np.zeros((len(self.BUTTONS_IDX_TO_COMBINATION)), dtype=bool)
self.CAMERA_IDX_TO_FACTORED = np.zeros((len(self.camera_idx_to_combination), 2), dtype=int)
# Pre compute Buttons
for jnt_ac, button_comb in self.BUTTONS_IDX_TO_COMBINATION.items():
new_button_ac = np.zeros(len(Buttons.ALL), dtype="i")
if button_comb == "inventory":
new_button_ac[Buttons.ALL.index("inventory")] = 1
else:
for group_choice in button_comb[:-1]: # Last one is camera
if group_choice != "none":
new_button_ac[Buttons.ALL.index(group_choice)] = 1
if button_comb[-1] != "camera": # This means camera meta action is off
self.BUTTON_IDX_TO_CAMERA_META_OFF[jnt_ac] = True
self.BUTTON_IDX_TO_FACTORED[jnt_ac] = new_button_ac
# Pre compute camera
for jnt_ac, camera_comb in self.camera_idx_to_combination.items():
new_camera_ac = np.ones((2), dtype="i") * self.camera_null_bin
new_camera_ac[0] = self.camera_groups["camera_x"].index(camera_comb[0])
new_camera_ac[1] = self.camera_groups["camera_y"].index(camera_comb[1])
self.CAMERA_IDX_TO_FACTORED[jnt_ac] = new_camera_ac
def from_factored(self, ac: Dict) -> Dict:
"""Converts a factored action (ac) to the new space. Assumes ac has a batch dim"""
assert ac["camera"].ndim == 2, f"bad camera label, {ac['camera']}"
assert ac["buttons"].ndim == 2, f"bad buttons label, {ac['buttons']}"
# Get button choices for everything but camera
choices_by_group = OrderedDict(
(k, self.factored_buttons_to_groups(ac["buttons"], v)) for k, v in self.BUTTONS_GROUPS.items() if k != "camera"
)
# Set camera "on off" action based on whether non-null camera action was given
camera_is_null = np.all(ac["camera"] == self.camera_null_bin, axis=1)
choices_by_group["camera"] = ["none" if is_null else "camera" for is_null in camera_is_null]
new_button_ac = []
new_camera_ac = []
for i in range(ac["buttons"].shape[0]):
# Buttons
key = tuple([v[i] for v in choices_by_group.values()])
if ac["buttons"][i, Buttons.ALL.index("inventory")] == 1:
key = "inventory"
new_button_ac.append(self.BUTTONS_COMBINATION_TO_IDX[key])
# Camera -- inventory is also exclusive with camera
if key == "inventory":
key = (
f"camera_x{self.camera_null_bin}",
f"camera_y{self.camera_null_bin}",
)
else:
key = (f"camera_x{ac['camera'][i][0]}", f"camera_y{ac['camera'][i][1]}")
new_camera_ac.append(self.camera_combination_to_idx[key])
return dict(
buttons=np.array(new_button_ac)[:, None],
camera=np.array(new_camera_ac)[:, None],
)
def to_factored(self, ac: Dict) -> Dict:
"""Converts an action in the new space (ac) to the factored action space. Assumes ac has a batch dim"""
assert ac["camera"].shape[-1] == 1
assert ac["buttons"].shape[-1] == 1
new_button_ac = self.BUTTON_IDX_TO_FACTORED[np.squeeze(ac["buttons"], -1)]
camera_off = self.BUTTON_IDX_TO_CAMERA_META_OFF[np.squeeze(ac["buttons"], -1)]
new_camera_ac = self.CAMERA_IDX_TO_FACTORED[np.squeeze(ac["camera"], -1)]
new_camera_ac[camera_off] = self.camera_null_bin
return dict(buttons=new_button_ac, camera=new_camera_ac)
def get_action_space_update(self):
return {
"camera": TensorType(shape=(1,), eltype=Discrete(len(self.camera_combinations))),
"buttons": TensorType(shape=(1,), eltype=Discrete(len(self.BUTTONS_COMBINATIONS))),
}
def get_zero_action(self):
return self._null_action
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | data_loader.py | # Code for loading OpenAI MineRL VPT datasets
# (NOTE: Not the original code!)
import json
import glob
import os
import random
from multiprocessing import Process, Queue, Event
import numpy as np
import cv2
from openai_vpt.agent import ACTION_TRANSFORMER_KWARGS, resize_image, AGENT_RESOLUTION
from openai_vpt.lib.actions import ActionTransformer
QUEUE_TIMEOUT = 10
CURSOR_FILE = os.path.join(os.path.dirname(__file__), "cursors", "mouse_cursor_white_16x16.png")
# Mapping from JSON keyboard buttons to MineRL actions
KEYBOARD_BUTTON_MAPPING = {
"key.keyboard.escape" :"ESC",
"key.keyboard.s" :"back",
"key.keyboard.q" :"drop",
"key.keyboard.w" :"forward",
"key.keyboard.1" :"hotbar.1",
"key.keyboard.2" :"hotbar.2",
"key.keyboard.3" :"hotbar.3",
"key.keyboard.4" :"hotbar.4",
"key.keyboard.5" :"hotbar.5",
"key.keyboard.6" :"hotbar.6",
"key.keyboard.7" :"hotbar.7",
"key.keyboard.8" :"hotbar.8",
"key.keyboard.9" :"hotbar.9",
"key.keyboard.e" :"inventory",
"key.keyboard.space" :"jump",
"key.keyboard.a" :"left",
"key.keyboard.d" :"right",
"key.keyboard.left.shift" :"sneak",
"key.keyboard.left.control" :"sprint",
"key.keyboard.f" :"swapHands",
}
# Template action
NOOP_ACTION = {
"ESC": 0,
"back": 0,
"drop": 0,
"forward": 0,
"hotbar.1": 0,
"hotbar.2": 0,
"hotbar.3": 0,
"hotbar.4": 0,
"hotbar.5": 0,
"hotbar.6": 0,
"hotbar.7": 0,
"hotbar.8": 0,
"hotbar.9": 0,
"inventory": 0,
"jump": 0,
"left": 0,
"right": 0,
"sneak": 0,
"sprint": 0,
"swapHands": 0,
"camera": np.array([0, 0]),
"attack": 0,
"use": 0,
"pickItem": 0,
}
MINEREC_ORIGINAL_HEIGHT_PX = 720
# Matches a number in the MineRL Java code
# search the code Java code for "constructMouseState"
# to find explanations
CAMERA_SCALER = 360.0 / 2400.0
# If GUI is open, mouse dx/dy need also be adjusted with these scalers.
# If data version is not present, assume it is 1.
MINEREC_VERSION_SPECIFIC_SCALERS = {
"5.7": 0.5,
"5.8": 0.5,
"6.7": 2.0,
"6.8": 2.0,
"6.9": 2.0,
}
def json_action_to_env_action(json_action):
"""
Converts a json action into a MineRL action.
Returns (minerl_action, is_null_action)
"""
# This might be slow...
env_action = NOOP_ACTION.copy()
# As a safeguard, make camera action again so we do not override anything
env_action["camera"] = np.array([0, 0])
is_null_action = True
keyboard_keys = json_action["keyboard"]["keys"]
for key in keyboard_keys:
# You can have keys that we do not use, so just skip them
# NOTE in original training code, ESC was removed and replaced with
# "inventory" action if GUI was open.
# Not doing it here, as BASALT uses ESC to quit the game.
if key in KEYBOARD_BUTTON_MAPPING:
env_action[KEYBOARD_BUTTON_MAPPING[key]] = 1
is_null_action = False
mouse = json_action["mouse"]
camera_action = env_action["camera"]
camera_action[0] = mouse["dy"] * CAMERA_SCALER
camera_action[1] = mouse["dx"] * CAMERA_SCALER
if mouse["dx"] != 0 or mouse["dy"] != 0:
is_null_action = False
else:
if abs(camera_action[0]) > 180:
camera_action[0] = 0
if abs(camera_action[1]) > 180:
camera_action[1] = 0
mouse_buttons = mouse["buttons"]
if 0 in mouse_buttons:
env_action["attack"] = 1
is_null_action = False
if 1 in mouse_buttons:
env_action["use"] = 1
is_null_action = False
if 2 in mouse_buttons:
env_action["pickItem"] = 1
is_null_action = False
return env_action, is_null_action
def composite_images_with_alpha(image1, image2, alpha, x, y):
"""
Draw image2 over image1 at location x,y, using alpha as the opacity for image2.
Modifies image1 in-place
"""
ch = max(0, min(image1.shape[0] - y, image2.shape[0]))
cw = max(0, min(image1.shape[1] - x, image2.shape[1]))
if ch == 0 or cw == 0:
return
alpha = alpha[:ch, :cw]
image1[y:y + ch, x:x + cw, :] = (image1[y:y + ch, x:x + cw, :] * (1 - alpha) + image2[:ch, :cw, :] * alpha).astype(np.uint8)
def data_loader_worker(tasks_queue, output_queue, quit_workers_event):
"""
Worker for the data loader.
"""
cursor_image = cv2.imread(CURSOR_FILE, cv2.IMREAD_UNCHANGED)
# Assume 16x16
cursor_image = cursor_image[:16, :16, :]
cursor_alpha = cursor_image[:, :, 3:] / 255.0
cursor_image = cursor_image[:, :, :3]
while True:
task = tasks_queue.get()
if task is None:
break
trajectory_id, video_path, json_path = task
video = cv2.VideoCapture(video_path)
# Note: In some recordings, the game seems to start
# with attack always down from the beginning, which
# is stuck down until player actually presses attack
attack_is_stuck = False
# Scrollwheel is allowed way to change items, but this is
# not captured by the recorder.
# Work around this by keeping track of selected hotbar item
# and updating "hotbar.#" actions when hotbar selection changes.
last_hotbar = 0
with open(json_path) as json_file:
json_lines = json_file.readlines()
json_data = "[" + ",".join(json_lines) + "]"
json_data = json.loads(json_data)
for i in range(len(json_data)):
if quit_workers_event.is_set():
break
step_data = json_data[i]
if i == 0:
# Check if attack will be stuck down
if step_data["mouse"]["newButtons"] == [0]:
attack_is_stuck = True
elif attack_is_stuck:
# Check if we press attack down, then it might not be stuck
if 0 in step_data["mouse"]["newButtons"]:
attack_is_stuck = False
# If still stuck, remove the action
if attack_is_stuck:
step_data["mouse"]["buttons"] = [button for button in step_data["mouse"]["buttons"] if button != 0]
action, is_null_action = json_action_to_env_action(step_data)
# Update hotbar selection
current_hotbar = step_data["hotbar"]
if current_hotbar != last_hotbar:
action["hotbar.{}".format(current_hotbar + 1)] = 1
last_hotbar = current_hotbar
# Read frame even if this is null so we progress forward
ret, frame = video.read()
if ret:
# Skip null actions as done in the VPT paper
# NOTE: in VPT paper, this was checked _after_ transforming into agent's action-space.
# We do this here as well to reduce amount of data sent over.
if is_null_action:
continue
if step_data["isGuiOpen"]:
camera_scaling_factor = frame.shape[0] / MINEREC_ORIGINAL_HEIGHT_PX
cursor_x = int(step_data["mouse"]["x"] * camera_scaling_factor)
cursor_y = int(step_data["mouse"]["y"] * camera_scaling_factor)
composite_images_with_alpha(frame, cursor_image, cursor_alpha, cursor_x, cursor_y)
cv2.cvtColor(frame, code=cv2.COLOR_BGR2RGB, dst=frame)
frame = np.asarray(np.clip(frame, 0, 255), dtype=np.uint8)
frame = resize_image(frame, AGENT_RESOLUTION)
output_queue.put((trajectory_id, frame, action), timeout=QUEUE_TIMEOUT)
else:
print(f"Could not read frame from video {video_path}")
video.release()
# Signal that this task is done
# Yes we are using "None"s to tell when worker is done
# and when individual work-items are done...
output_queue.put((trajectory_id, None, None), timeout=QUEUE_TIMEOUT)
if quit_workers_event.is_set():
break
# Tell that we ended
output_queue.put(None)
class DataLoader:
"""
Generator class for loading batches from a dataset
This only returns a single step at a time per worker; no sub-sequences.
Idea is that you keep track of the model's hidden state and feed that in,
along with one sample at a time.
+ Simpler loader code
+ Supports lower end hardware
- Not very efficient (could be faster)
- No support for sub-sequences
- Loads up individual files as trajectory files (i.e. if a trajectory is split into multiple files,
this code will load it up as a separate item).
"""
def __init__(self, dataset_dir, n_workers=8, batch_size=8, n_epochs=1, max_queue_size=8):
assert n_workers >= batch_size, "Number of workers must be equal or greater than batch size"
self.dataset_dir = dataset_dir
self.n_workers = n_workers
self.n_epochs = n_epochs
self.batch_size = batch_size
self.max_queue_size = max_queue_size
unique_ids = glob.glob(os.path.join(dataset_dir, "*.mp4"))
unique_ids = list(set([os.path.basename(x).split(".")[0] for x in unique_ids]))
self.unique_ids = unique_ids
# Create tuples of (video_path, json_path) for each unique_id
demonstration_tuples = []
for unique_id in unique_ids:
video_path = os.path.abspath(os.path.join(dataset_dir, unique_id + ".mp4"))
json_path = os.path.abspath(os.path.join(dataset_dir, unique_id + ".jsonl"))
demonstration_tuples.append((video_path, json_path))
assert n_workers <= len(demonstration_tuples), f"n_workers should be lower or equal than number of demonstrations {len(demonstration_tuples)}"
# Repeat dataset for n_epochs times, shuffling the order for
# each epoch
self.demonstration_tuples = []
for i in range(n_epochs):
random.shuffle(demonstration_tuples)
self.demonstration_tuples += demonstration_tuples
self.task_queue = Queue()
self.n_steps_processed = 0
for trajectory_id, task in enumerate(self.demonstration_tuples):
self.task_queue.put((trajectory_id, *task))
for _ in range(n_workers):
self.task_queue.put(None)
self.output_queues = [Queue(maxsize=max_queue_size) for _ in range(n_workers)]
self.quit_workers_event = Event()
self.processes = [
Process(
target=data_loader_worker,
args=(
self.task_queue,
output_queue,
self.quit_workers_event,
),
daemon=True
)
for output_queue in self.output_queues
]
for process in self.processes:
process.start()
def __iter__(self):
return self
def __next__(self):
batch_frames = []
batch_actions = []
batch_episode_id = []
for i in range(self.batch_size):
workitem = self.output_queues[self.n_steps_processed % self.n_workers].get(timeout=QUEUE_TIMEOUT)
if workitem is None:
# Stop iteration when first worker runs out of work to do.
# Yes, this has a chance of cutting out a lot of the work,
# but this ensures batches will remain diverse, instead
# of having bad ones in the end where potentially
# one worker outputs all samples to the same batch.
raise StopIteration()
trajectory_id, frame, action = workitem
batch_frames.append(frame)
batch_actions.append(action)
batch_episode_id.append(trajectory_id)
self.n_steps_processed += 1
return batch_frames, batch_actions, batch_episode_id
def __del__(self):
self.quit_workers_event.set()
for process in self.processes:
process.terminate()
process.join()
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | openai_vpt~lib~impala_cnn.py | import math
from copy import deepcopy
from typing import Dict, List, Optional
from torch import nn
from torch.nn import functional as F
from openai_vpt.lib import misc
from openai_vpt.lib import torch_util as tu
from openai_vpt.lib.util import FanInInitReLULayer
class CnnBasicBlock(nn.Module):
"""
Residual basic block, as in ImpalaCNN. Preserves channel number and shape
:param inchan: number of input channels
:param init_scale: weight init scale multiplier
"""
def __init__(
self,
inchan: int,
init_scale: float = 1,
log_scope="",
init_norm_kwargs: Dict = {},
**kwargs,
):
super().__init__()
self.inchan = inchan
s = math.sqrt(init_scale)
self.conv0 = FanInInitReLULayer(
self.inchan,
self.inchan,
kernel_size=3,
padding=1,
init_scale=s,
log_scope=f"{log_scope}/conv0",
**init_norm_kwargs,
)
self.conv1 = FanInInitReLULayer(
self.inchan,
self.inchan,
kernel_size=3,
padding=1,
init_scale=s,
log_scope=f"{log_scope}/conv1",
**init_norm_kwargs,
)
def forward(self, x):
x = x + self.conv1(self.conv0(x))
return x
class CnnDownStack(nn.Module):
"""
Downsampling stack from Impala CNN.
:param inchan: number of input channels
:param nblock: number of residual blocks after downsampling
:param outchan: number of output channels
:param init_scale: weight init scale multiplier
:param pool: if true, downsample with max pool
:param post_pool_groups: if not None, normalize with group norm with this many groups
:param kwargs: remaining kwargs are passed into the blocks and layers
"""
name = "Impala_CnnDownStack"
def __init__(
self,
inchan: int,
nblock: int,
outchan: int,
init_scale: float = 1,
pool: bool = True,
post_pool_groups: Optional[int] = None,
log_scope: str = "",
init_norm_kwargs: Dict = {},
first_conv_norm=False,
**kwargs,
):
super().__init__()
self.inchan = inchan
self.outchan = outchan
self.pool = pool
first_conv_init_kwargs = deepcopy(init_norm_kwargs)
if not first_conv_norm:
first_conv_init_kwargs["group_norm_groups"] = None
first_conv_init_kwargs["batch_norm"] = False
self.firstconv = FanInInitReLULayer(
inchan,
outchan,
kernel_size=3,
padding=1,
log_scope=f"{log_scope}/firstconv",
**first_conv_init_kwargs,
)
self.post_pool_groups = post_pool_groups
if post_pool_groups is not None:
self.n = nn.GroupNorm(post_pool_groups, outchan)
self.blocks = nn.ModuleList(
[
CnnBasicBlock(
outchan,
init_scale=init_scale / math.sqrt(nblock),
log_scope=f"{log_scope}/block{i}",
init_norm_kwargs=init_norm_kwargs,
**kwargs,
)
for i in range(nblock)
]
)
def forward(self, x):
x = self.firstconv(x)
if self.pool:
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
if self.post_pool_groups is not None:
x = self.n(x)
x = tu.sequential(self.blocks, x, diag_name=self.name)
return x
def output_shape(self, inshape):
c, h, w = inshape
assert c == self.inchan
if self.pool:
return (self.outchan, (h + 1) // 2, (w + 1) // 2)
else:
return (self.outchan, h, w)
class ImpalaCNN(nn.Module):
"""
:param inshape: input image shape (height, width, channels)
:param chans: number of residual downsample stacks. Each element is the number of
filters per convolution in the stack
:param outsize: output hidden size
:param nblock: number of residual blocks per stack. Each block has 2 convs and a residual
:param init_norm_kwargs: arguments to be passed to convolutional layers. Options can be found
in ypt.model.util:FanInInitReLULayer
:param dense_init_norm_kwargs: arguments to be passed to convolutional layers. Options can be found
in ypt.model.util:FanInInitReLULayer
:param kwargs: remaining kwargs are passed into the CnnDownStacks
"""
name = "ImpalaCNN"
def __init__(
self,
inshape: List[int],
chans: List[int],
outsize: int,
nblock: int,
init_norm_kwargs: Dict = {},
dense_init_norm_kwargs: Dict = {},
first_conv_norm=False,
**kwargs,
):
super().__init__()
h, w, c = inshape
curshape = (c, h, w)
self.stacks = nn.ModuleList()
for i, outchan in enumerate(chans):
stack = CnnDownStack(
curshape[0],
nblock=nblock,
outchan=outchan,
init_scale=math.sqrt(len(chans)),
log_scope=f"downstack{i}",
init_norm_kwargs=init_norm_kwargs,
first_conv_norm=first_conv_norm if i == 0 else True,
**kwargs,
)
self.stacks.append(stack)
curshape = stack.output_shape(curshape)
self.dense = FanInInitReLULayer(
misc.intprod(curshape),
outsize,
layer_type="linear",
log_scope="imapala_final_dense",
init_scale=1.4,
**dense_init_norm_kwargs,
)
self.outsize = outsize
def forward(self, x):
b, t = x.shape[:-3]
x = x.reshape(b * t, *x.shape[-3:])
x = misc.transpose(x, "bhwc", "bchw")
x = tu.sequential(self.stacks, x, diag_name=self.name)
x = x.reshape(b, t, *x.shape[1:])
x = tu.flatten_image(x)
x = self.dense(x)
return x
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | openai_vpt~lib~scaled_mse_head.py | from typing import Dict, Optional
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from openai_vpt.lib.action_head import fan_in_linear
from openai_vpt.lib.normalize_ewma import NormalizeEwma
class ScaledMSEHead(nn.Module):
"""
Linear output layer that scales itself so that targets are always normalized to N(0, 1)
"""
def __init__(
self, input_size: int, output_size: int, norm_type: Optional[str] = "ewma", norm_kwargs: Optional[Dict] = None
):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.norm_type = norm_type
self.linear = nn.Linear(self.input_size, self.output_size)
norm_kwargs = {} if norm_kwargs is None else norm_kwargs
self.normalizer = NormalizeEwma(output_size, **norm_kwargs)
def reset_parameters(self):
init.orthogonal_(self.linear.weight)
fan_in_linear(self.linear)
self.normalizer.reset_parameters()
def forward(self, input_data):
return self.linear(input_data)
def loss(self, prediction, target):
"""
Calculate the MSE loss between output and a target.
'Prediction' has to be normalized while target is denormalized.
Loss is calculated in a 'normalized' space.
"""
return F.mse_loss(prediction, self.normalizer(target), reduction="mean")
def denormalize(self, input_data):
"""Convert input value from a normalized space into the original one"""
return self.normalizer.denormalize(input_data)
def normalize(self, input_data):
return self.normalizer(input_data)
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | openai_vpt~lib~minecraft_util.py | import functools
import inspect
from typing import Optional, Tuple
import numpy as np
import torch
from openai_vpt.lib.action_head import (CategoricalActionHead, DiagGaussianActionHead,
DictActionHead)
def store_args(method):
"""Stores provided method args as instance attributes."""
argspec = inspect.getfullargspec(method)
defaults = {}
if argspec.defaults is not None:
defaults = dict(zip(argspec.args[-len(argspec.defaults) :], argspec.defaults))
if argspec.kwonlydefaults is not None:
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
# Get default arg values
args = defaults.copy()
# Add provided arg values
for name, value in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper
def get_norm_entropy_from_cat_head(module, name, masks, logits):
# Note that the mask has already been applied to the logits at this point
entropy = -torch.sum(torch.exp(logits) * logits, dim=-1)
if name in masks:
n = torch.sum(masks[name], dim=-1, dtype=torch.float)
norm_entropy = entropy / torch.log(n)
# When the mask only allows one option the normalized entropy makes no sense
# as it is basically both maximal (the distribution is as uniform as it can be)
# and minimal (there is no variance at all).
# A such, we ignore them for purpose of calculating entropy.
zero = torch.zeros_like(norm_entropy)
norm_entropy = torch.where(n.eq(1.0), zero, norm_entropy)
count = n.not_equal(1.0).int()
else:
n = torch.tensor(logits.shape[-1], dtype=torch.float)
norm_entropy = entropy / torch.log(n)
count = torch.ones_like(norm_entropy, dtype=torch.int)
# entropy is per-entry, still of size self.output_shape[:-1]; we need to reduce of the rest of it.
for _ in module.output_shape[:-1]:
norm_entropy = norm_entropy.sum(dim=-1)
count = count.sum(dim=-1)
return norm_entropy, count
def get_norm_cat_entropy(module, masks, logits, template) -> Tuple[torch.Tensor, torch.Tensor]:
entropy_sum = torch.zeros_like(template, dtype=torch.float)
counts = torch.zeros_like(template, dtype=torch.int)
for k, subhead in module.items():
if isinstance(subhead, DictActionHead):
entropy, count = get_norm_cat_entropy(subhead, masks, logits[k], template)
elif isinstance(subhead, CategoricalActionHead):
entropy, count = get_norm_entropy_from_cat_head(subhead, k, masks, logits[k])
else:
continue
entropy_sum += entropy
counts += count
return entropy_sum, counts
def get_diag_guassian_entropy(module, logits, template) -> Optional[torch.Tensor]:
entropy_sum = torch.zeros_like(template, dtype=torch.float)
count = torch.zeros(1, device=template.device, dtype=torch.int)
for k, subhead in module.items():
if isinstance(subhead, DictActionHead):
entropy_sum += get_diag_guassian_entropy(subhead, logits[k], template)
elif isinstance(subhead, DiagGaussianActionHead):
entropy_sum += module.entropy(logits)
else:
continue
count += 1
return entropy_sum / count
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | openai_vpt~lib~xf.py | """
Implementation of transformer and reshaping-based sparse transformer
"""
import functools
import math
import torch as th
from torch import nn
from torch.nn import functional as F
from openai_vpt.lib import misc, mlp
from openai_vpt.lib import torch_util as tu
from openai_vpt.lib import util
SENTINEL = 0.1337
def attention(
Q_bte,
K_bTe,
V_bTe,
dtype,
mask=True,
extra_btT=None,
maxlen=None,
check_sentinel=False,
use_muP_factor=False,
):
"""
performs softmax(Q*K)*V operation
t : output (write) time axis, possibly size=1 for just the last timestep
T : input (read) time axis
t < T is OK
'check_sentinel' is used when you want to make it impossible to attend to certain keys.
All keys where every value is equal to the constant SENTINEL will be ignored.
Currently this is only used by StridedAttn.
"""
assert Q_bte.dtype == K_bTe.dtype == dtype, f"{Q_bte.dtype}, {K_bTe.dtype}, {dtype} must all match"
e = Q_bte.shape[2]
if check_sentinel:
invalid = (K_bTe == SENTINEL).int().sum(dim=-1) == e
invalid = misc.reshape(invalid, "b, T", "b, 1, T")
if isinstance(mask, th.Tensor):
bias = (~mask).float() * -1e9
elif mask:
bias = get_attn_bias_cached(Q_bte.shape[1], K_bTe.shape[1], maxlen=maxlen, device=Q_bte.device, dtype=th.float32)
else:
bias = Q_bte.new_zeros((), dtype=th.float32)
if extra_btT is not None:
bias = bias + extra_btT
# Equivalent to bias + (1 / math.sqrt(e)) * th.einsum("bte,bpe->btp", Q_bte, K_bte)
# but faster:
logit_btT = th.baddbmm(
bias,
Q_bte.float(),
K_bTe.float().transpose(-1, -2),
alpha=(1 / e) if use_muP_factor else (1 / math.sqrt(e)),
)
if check_sentinel:
logit_btT = logit_btT - 1e9 * invalid.float()
W_btT = th.softmax(logit_btT, dim=2).to(dtype)
if callable(V_bTe):
# This is used by the sharded video model to defer waiting on
# the broadcast of the values until they're needed
V_bTe = V_bTe()
# th.einsum only lets you use lowercase letters, so 'p' for 'past'
# means 'T'
A_bte = th.einsum("btp,bpe->bte", W_btT, V_bTe)
return A_bte
class Attn:
"""
Defines an attention mechanism
All the mechanisms here can be defined by two operations:
1. preprocessing Q,K,V,R[=relative attention query]
to move axes from embedding dimension to
batch dimension, and possibly doing shifts.
2. postprocessing the final result to move axes back to embedding
axis.
"""
def __init__(self, mask, maxlen):
self.mask = mask
self.maxlen = maxlen
def preproc_qkv(self, Q_bte, K_bte, V_bte):
raise NotImplementedError
def preproc_r(self, R_btn):
raise NotImplementedError
def split_heads(x_bte, h):
b, t, e = x_bte.shape
assert e % h == 0, "Embsize must be divisible by number of heads"
q = e // h
x_bthq = x_bte.reshape((b, t, h, q))
x_bhtq = misc.transpose(x_bthq, "bthq", "bhtq")
x_Btq = x_bhtq.reshape((b * h, t, q))
return x_Btq
class All2All(Attn):
def __init__(self, nhead, maxlen, mask=True, head_dim=None):
super().__init__(mask=mask, maxlen=maxlen)
assert (nhead is None) != (head_dim is None), "exactly one of nhead and head_dim must be specified"
self.h = nhead
self.head_dim = head_dim
def preproc_qkv(self, *xs):
q = xs[0].shape[-1]
for x in xs:
assert x.shape[-1] == q, "embedding dimensions do not match"
h = self.h or misc.exact_div(q, self.head_dim)
postproc = functools.partial(self.postproc_a, h=h)
return (postproc, *tuple(split_heads(x, h) for x in xs))
def preproc_r(self, R_btn):
_, ret = self.preproc_qkv(R_btn)
return ret
def postproc_a(self, A_Btq, h):
B, t, q = A_Btq.shape
b = B // h
A_bhtq = A_Btq.reshape((b, h, t, q))
A_bthq = misc.transpose(A_bhtq, "bhtq", "bthq")
A_bte = A_bthq.reshape((b, t, h * q))
return A_bte
def _required_padding(dim, target_div):
if dim % target_div == 0:
return 0
else:
return target_div - dim % target_div
class StridedAttn(Attn):
def __init__(self, nhead, stride, maxlen, mask=True):
super().__init__(mask=mask, maxlen=maxlen)
self.h = nhead
self.stride = stride
def _preproc(self, x, name, Q_t=None, Q_pad=None):
x, undo = misc.reshape_undo(x, "b, t*stride, e", "b, 1, t, stride*e", stride=self.stride)
if name == "Q":
Q_pad = _required_padding(x.shape[2], self.maxlen)
original_t = x.shape[2]
x = F.pad(x, (0, 0, 0, Q_pad), value=SENTINEL)
undo = misc.compose_undo(undo, lambda x: x[:, :, :original_t])
if name == "Q":
Q_t = x.shape[2]
assert Q_t % self.maxlen == 0, f"{Q_t} % {self.maxlen} != 0"
else:
required_len = Q_t + self.maxlen
if x.shape[2] < required_len:
x = F.pad(x, (0, 0, required_len - x.shape[2], 0), value=SENTINEL)
assert x.shape[2] >= required_len
back = x[:, :, -Q_t - self.maxlen : -self.maxlen]
front = x[:, :, -Q_t:]
x = th.cat([back, front], dim=1)
_, _, t, _ = x.shape
assert t == Q_t, f"{t} != {Q_t}"
x, undo = misc.reshape_undo(
x,
"b, pad_shift, t*maxlen, stride*h*q",
"b, pad_shift, t, maxlen, stride, h, q",
maxlen=self.maxlen,
h=self.h,
stride=self.stride,
undo=undo,
)
x, undo = misc.transpose_undo(x, "bptmshq", "bthspmq", undo=undo)
x, undo = misc.reshape_undo(
x,
"b, t, h, stride, pad_shift, maxlen, q",
"b*t*h*stride, pad_shift*maxlen, q",
undo=undo,
)
if name == "Q":
return x, undo, Q_t, Q_pad
else:
return x
def preproc_qkv(self, Q_bte, K_bte, V_bte):
pad = _required_padding(Q_bte.shape[1], self.stride)
if pad:
Q_bte = F.pad(Q_bte, (0, 0, 0, pad), value=SENTINEL)
K_bte = F.pad(K_bte, (0, 0, 0, pad), value=SENTINEL) if K_bte is not None else None
V_bte = F.pad(V_bte, (0, 0, 0, pad), value=SENTINEL) if V_bte is not None else None
undo = lambda x, pad=pad: x[:, :-pad]
else:
undo = None
if K_bte is not None:
pad = _required_padding(K_bte.shape[1], self.stride)
if pad:
K_bte = F.pad(K_bte, (0, 0, pad, 0), value=SENTINEL)
V_bte = F.pad(V_bte, (0, 0, pad, 0), value=SENTINEL)
assert Q_bte.shape[1] % self.stride == 0
assert K_bte is None or K_bte.shape[1] % self.stride == 0
assert V_bte is None or V_bte.shape[1] % self.stride == 0
Q, postproc, Q_t, Q_pad = self._preproc(Q_bte, "Q")
postproc = misc.compose_undo(undo, postproc)
return (
postproc,
Q,
self._preproc(K_bte, "K", Q_t=Q_t, Q_pad=Q_pad) if K_bte is not None else None,
self._preproc(V_bte, "V", Q_t=Q_t, Q_pad=Q_pad) if V_bte is not None else None,
)
def preproc_r(self, R_bte):
_, R, _, _ = self.preproc_qkv(R_bte, None, None)
return R
Q_SCALE = 0.1
K_SCALE = 0.2
V_SCALE = 1.0
PROJ_SCALE = 1.0
MLP0_SCALE = 1.0
MLP1_SCALE = 1.0
R_SCALE = 0.1
B_SCALE = 0.2
class AttentionLayerBase(nn.Module):
def __init__(
self,
*,
attn,
scale,
x_size,
c_size,
qk_size,
v_size,
dtype,
relattn=False,
seqlens=None,
separate=False,
):
super().__init__()
dtype = tu.parse_dtype(dtype)
self.attn = attn
self.x_size = x_size
self.c_size = c_size
s = math.sqrt(scale)
separgs = dict(seqlens=seqlens, separate=separate)
self.q_layer = MultiscaleLinear(x_size, qk_size, name="q", scale=Q_SCALE, dtype=dtype, **separgs)
self.k_layer = MultiscaleLinear(c_size, qk_size, name="k", scale=K_SCALE, bias=False, dtype=dtype, **separgs)
self.v_layer = MultiscaleLinear(c_size, v_size, name="v", scale=V_SCALE * s, bias=False, dtype=dtype, **separgs)
self.proj_layer = MultiscaleLinear(v_size, x_size, name="proj", scale=PROJ_SCALE * s, dtype=dtype, **separgs)
self.relattn = relattn
maxlen = attn.maxlen
assert maxlen > 0 or not attn.mask
if self.relattn:
nbasis = 10
self.r_layer = tu.NormedLinear(x_size, nbasis * attn.h, scale=R_SCALE, dtype=dtype)
self.b_nd = nn.Parameter(th.randn(nbasis, maxlen) * B_SCALE)
self.maxlen = maxlen
self.dtype = dtype
def relattn_logits(self, X_bte, T):
R_btn = self.r_layer(X_bte).float()
R_btn = self.attn.preproc_r(R_btn)
t = R_btn.shape[1]
D_ntT = util.bandify(self.b_nd, t, T)
extra_btT = th.einsum("btn,ntp->btp", R_btn, D_ntT)
return extra_btT
def quick_gelu(x):
return x * th.sigmoid(1.702 * x)
def act(actname, x):
if actname == "relu":
return F.relu(x)
elif actname == "gelu":
return quick_gelu(x)
elif actname == "none":
return x
else:
raise NotImplementedError(actname)
class SelfAttentionLayer(AttentionLayerBase):
"""
Residual attention layer that takes a single tensor x and has it attend to itself
Has the form
output = x + f(x)
"""
def __init__(
self,
x_size,
attn,
scale,
dtype="float32",
norm="layer",
cache_keep_len=None,
relattn=False,
log_scope="sa",
use_muP_factor=False,
**kwargs,
):
super().__init__(
x_size=x_size,
c_size=x_size,
qk_size=x_size,
v_size=x_size,
attn=attn,
scale=scale,
relattn=relattn,
dtype=dtype,
**kwargs,
)
self.ln_x = util.get_norm(norm, x_size, dtype=dtype)
if cache_keep_len is None:
if hasattr(attn, "cache_keep_len"):
cache_keep_len = attn.cache_keep_len
else:
if isinstance(attn, StridedAttn):
stride = attn.stride
else:
stride = 1
cache_keep_len = stride * attn.maxlen
self.cache_keep_len = cache_keep_len
self.log_scope = log_scope
self.use_muP_factor = use_muP_factor
def residual(self, X_bte, state):
X_bte = self.ln_x(X_bte)
Q_bte = self.q_layer(X_bte)
K_bte = self.k_layer(X_bte)
V_bte = self.v_layer(X_bte)
if state:
state, K_bte, V_bte = self.update_state(state, K_bte, V_bte)
postproc_closure, Q_bte, K_bte, V_bte = self.attn.preproc_qkv(Q_bte, K_bte, V_bte)
extra_btT = self.relattn_logits(X_bte, K_bte.shape[1]) if self.relattn else None
A_bte = attention(
Q_bte,
K_bte,
V_bte,
mask=self.attn.mask,
extra_btT=extra_btT,
maxlen=self.maxlen,
dtype=self.dtype,
check_sentinel=isinstance(self.attn, StridedAttn),
use_muP_factor=self.use_muP_factor,
)
A_bte = postproc_closure(A_bte)
Aproj_bte = self.proj_layer(A_bte)
return Aproj_bte, state
def forward(self, X_bte, state):
R_bte, state = self.residual(X_bte, state)
return X_bte + R_bte, state
def stateless_forward(self, X_bte):
out_bte, _state = self.forward(X_bte, None)
return out_bte
def update_state(self, state, K_bte, V_bte):
def append(prev, new):
"""
Given `prev` keys from cache, and `new` keys,
returns (cache, full), where
- cache goes into the output state, length chosen so that on the
next timestep, there are enough cached timesteps to get the full
context of lenth self.maxlen.
- full is used for the current forward pass, with length chosen so
that the first timestep new[:, 0] gets to see a context of
self.maxlen.
"""
tprev = prev.shape[1]
startfull = max(tprev - self.cache_keep_len, 0)
full = th.cat([prev[:, startfull:], new], dim=1)
outstate = full[:, max(full.shape[1] - (self.cache_keep_len), 0) :]
# To see that the preceding slicing is correct, consider the case
# that maxlen==1. Then `full` only consists of `new`, and
# `outstate` is empty
return outstate, full
instate_K, instate_V = state
outstate_K, K_bte = append(instate_K, K_bte)
outstate_V, V_bte = append(instate_V, V_bte)
assert outstate_K.shape[-2] <= self.cache_keep_len
return (outstate_K, outstate_V), K_bte, V_bte
def initial_state(self, batchsize, initial_T=0):
return (
tu.zeros((batchsize, initial_T, self.x_size), dtype=self.dtype),
tu.zeros((batchsize, initial_T, self.x_size), dtype=self.dtype),
)
def empty_state(self):
return None
class PointwiseLayer(nn.Module):
"""
Residual MLP applied at each timestep
"""
def __init__(self, x_size, scale, dtype, norm, actname="relu", mlp_ratio=2):
super().__init__()
s = math.sqrt(scale)
self.ln = util.get_norm(norm, x_size, dtype=dtype)
self.mlp = mlp.MLP(
insize=x_size,
nhidlayer=1,
outsize=x_size,
hidsize=int(x_size * mlp_ratio),
hidactiv=functools.partial(act, actname),
dtype=dtype,
)
self.mlp.layers[0].weight.data *= MLP0_SCALE * s
self.mlp.layers[1].weight.data *= MLP1_SCALE * s
def residual(self, x):
x = self.ln(x)
x = self.mlp(x)
return x
def forward(self, x):
return x + self.residual(x)
def _is_separate(sep, name):
if isinstance(sep, bool):
return sep
assert isinstance(sep, set)
if name in sep:
sep.remove(name)
return True
else:
return False
def make_maybe_multiscale(make_fn, *args, seqlens, separate, name, **kwargs):
"""
This function either creates one instance of a module or creates
a separate instance of the module for each resolution of the image,
determined by the `separate` parameter. We create separate modules
if `separate` is True or if `separate` is a set containing `name`.
"""
if _is_separate(separate, name):
modules = [make_fn(*args, **kwargs) for _ in seqlens]
return SplitCallJoin(modules, seqlens)
else:
return make_fn(*args, **kwargs)
class SplitCallJoin(nn.Module):
def __init__(self, mods, seqlens):
super().__init__()
self.mods = nn.ModuleList(mods)
self.seqlens = seqlens
def forward(self, x):
tl = sum(self.seqlens)
x, undo = misc.reshape_undo(x, "..., z*tl, e", "..., z, tl, e", tl=tl)
x = list(th.split(x, self.seqlens, dim=-2))
new_x = []
for x, mod in misc.safezip(x, self.mods):
x, this_undo = misc.reshape_undo(x, "..., z, l, e", "..., z*l, e")
x = mod(x)
x = this_undo(x)
new_x.append(x)
x = th.cat(new_x, dim=-2)
x = undo(x)
return x
MultiscaleLinear = functools.partial(make_maybe_multiscale, tu.NormedLinear)
MultiscalePointwise = functools.partial(make_maybe_multiscale, PointwiseLayer)
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | openai_vpt~lib~actions.py | import attr
import minerl.herobraine.hero.mc as mc
import numpy as np
from openai_vpt.lib.minecraft_util import store_args
class Buttons:
ATTACK = "attack"
BACK = "back"
FORWARD = "forward"
JUMP = "jump"
LEFT = "left"
RIGHT = "right"
SNEAK = "sneak"
SPRINT = "sprint"
USE = "use"
DROP = "drop"
INVENTORY = "inventory"
ALL = [
ATTACK,
BACK,
FORWARD,
JUMP,
LEFT,
RIGHT,
SNEAK,
SPRINT,
USE,
DROP,
INVENTORY,
] + [f"hotbar.{i}" for i in range(1, 10)]
class SyntheticButtons:
# Composite / scripted actions
CHANNEL_ATTACK = "channel-attack"
ALL = [CHANNEL_ATTACK]
class QuantizationScheme:
LINEAR = "linear"
MU_LAW = "mu_law"
@attr.s(auto_attribs=True)
class CameraQuantizer:
"""
A camera quantizer that discretizes and undiscretizes a continuous camera input with y (pitch) and x (yaw) components.
Parameters:
- camera_binsize: The size of the bins used for quantization. In case of mu-law quantization, it corresponds to the average binsize.
- camera_maxval: The maximum value of the camera action.
- quantization_scheme: The quantization scheme to use. Currently, two quantization schemes are supported:
- Linear quantization (default): Camera actions are split uniformly into discrete bins
- Mu-law quantization: Transforms the camera action using mu-law encoding (https://en.wikipedia.org/wiki/%CE%9C-law_algorithm)
followed by the same quantization scheme used by the linear scheme.
- mu: Mu is the parameter that defines the curvature of the mu-law encoding. Higher values of
mu will result in a sharper transition near zero. Below are some reference values listed
for choosing mu given a constant maxval and a desired max_precision value.
maxval = 10 | max_precision = 0.5 | μ ≈ 2.93826
maxval = 10 | max_precision = 0.4 | μ ≈ 4.80939
maxval = 10 | max_precision = 0.25 | μ ≈ 11.4887
maxval = 20 | max_precision = 0.5 | μ ≈ 2.7
maxval = 20 | max_precision = 0.4 | μ ≈ 4.39768
maxval = 20 | max_precision = 0.25 | μ ≈ 10.3194
maxval = 40 | max_precision = 0.5 | μ ≈ 2.60780
maxval = 40 | max_precision = 0.4 | μ ≈ 4.21554
maxval = 40 | max_precision = 0.25 | μ ≈ 9.81152
"""
camera_maxval: int
camera_binsize: int
quantization_scheme: str = attr.ib(
default=QuantizationScheme.LINEAR,
validator=attr.validators.in_([QuantizationScheme.LINEAR, QuantizationScheme.MU_LAW]),
)
mu: float = attr.ib(default=5)
def discretize(self, xy):
xy = np.clip(xy, -self.camera_maxval, self.camera_maxval)
if self.quantization_scheme == QuantizationScheme.MU_LAW:
xy = xy / self.camera_maxval
v_encode = np.sign(xy) * (np.log(1.0 + self.mu * np.abs(xy)) / np.log(1.0 + self.mu))
v_encode *= self.camera_maxval
xy = v_encode
# Quantize using linear scheme
return np.round((xy + self.camera_maxval) / self.camera_binsize).astype(np.int64)
def undiscretize(self, xy):
xy = xy * self.camera_binsize - self.camera_maxval
if self.quantization_scheme == QuantizationScheme.MU_LAW:
xy = xy / self.camera_maxval
v_decode = np.sign(xy) * (1.0 / self.mu) * ((1.0 + self.mu) ** np.abs(xy) - 1.0)
v_decode *= self.camera_maxval
xy = v_decode
return xy
class ActionTransformer:
"""Transforms actions between internal array and minerl env format."""
@store_args
def __init__(
self,
camera_maxval=10,
camera_binsize=2,
camera_quantization_scheme="linear",
camera_mu=5,
):
self.quantizer = CameraQuantizer(
camera_maxval=camera_maxval,
camera_binsize=camera_binsize,
quantization_scheme=camera_quantization_scheme,
mu=camera_mu,
)
self.camera_zero_bin = self.camera_maxval // self.camera_binsize
def discretize_camera(self, xy):
return self.quantizer.discretize(xy)
def undiscretize_camera(self, pq):
return self.quantizer.undiscretize(pq)
def item_embed_id_to_name(self, item_id):
return mc.MINERL_ITEM_MAP[item_id]
def dict_to_numpy(self, acs):
"""
Env format to policy output format.
"""
act = {
"buttons": np.stack([acs.get(k, 0) for k in Buttons.ALL], axis=-1),
"camera": self.discretize_camera(acs["camera"]),
}
if not self.human_spaces:
act.update(
{
"synthetic_buttons": np.stack([acs[k] for k in SyntheticButtons.ALL], axis=-1),
"place": self.item_embed_name_to_id(acs["place"]),
"equip": self.item_embed_name_to_id(acs["equip"]),
"craft": self.item_embed_name_to_id(acs["craft"]),
}
)
return act
def numpy_to_dict(self, acs):
"""
Numpy policy output to env-compatible format.
"""
assert acs["buttons"].shape[-1] == len(
Buttons.ALL
), f"Mismatched actions: {acs}; expected {len(Buttons.ALL)}:\n( {Buttons.ALL})"
out = {name: acs["buttons"][..., i] for (i, name) in enumerate(Buttons.ALL)}
out["camera"] = self.undiscretize_camera(acs["camera"])
return out
def policy2env(self, acs):
acs = self.numpy_to_dict(acs)
return acs
def env2policy(self, acs):
nbatch = acs["camera"].shape[0]
dummy = np.zeros((nbatch,))
out = {
"camera": self.discretize_camera(acs["camera"]),
"buttons": np.stack([acs.get(k, dummy) for k in Buttons.ALL], axis=-1),
}
return out
| [] |
2024-01-10 | minerllabs/basalt-2022-behavioural-cloning-baseline | openai_vpt~lib~mlp.py | import torch as th
from torch import nn
from openai_vpt.lib import misc
from openai_vpt.lib import torch_util as tu
class MLP(nn.Module):
def __init__(self, insize, nhidlayer, outsize, hidsize, hidactiv, dtype=th.float32):
super().__init__()
self.insize = insize
self.nhidlayer = nhidlayer
self.outsize = outsize
in_sizes = [insize] + [hidsize] * nhidlayer
out_sizes = [hidsize] * nhidlayer + [outsize]
self.layers = nn.ModuleList(
[tu.NormedLinear(insize, outsize, dtype=dtype) for (insize, outsize) in misc.safezip(in_sizes, out_sizes)]
)
self.hidactiv = hidactiv
def forward(self, x):
*hidlayers, finallayer = self.layers
for layer in hidlayers:
x = layer(x)
x = self.hidactiv(x)
x = finallayer(x)
return x
@property
def output_shape(self):
return (self.outsize,)
| [] |
2024-01-10 | shengshengyang/fass-embedding | queryConnectOpenAiLine.py | import faiss
import numpy as np
import pandas as pd
import requests
import streamlit as st
import os
import json
from dotenv import load_dotenv
import time
import openai
# Set the page to wide layout
st.set_page_config(layout="wide")
# Read data from phone.xlsx
data = pd.read_excel('phone1.xlsx')
#load key
openai.api_key = os.getenv("OPENAI_KEY")
# Load the Faiss index and title vectors from files
index = faiss.read_index('index.faiss')
title_vectors = np.load('title_vectors.npy')
# Maximum number of tokens for a conversation
max_tokens = 4096
# Set up Streamlit app
st.markdown("<h1 style='text-align: center; color: black;'>大豐智慧課服</h1>", unsafe_allow_html=True)
# Create two columns
col1, col2 = st.columns(2)
load_dotenv()
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Create a placeholder for "GPT生成回覆" section
gpt_placeholder = col2.empty()
query = col2.text_input("請輸入您的問題,我將為您回答")
col2.subheader("GPT生成回覆:")
col1.subheader("最符合您問題的五個答案:")
def summarize_chat_history(chat_history):
if not chat_history:
return ""
# Convert chat history to text
chat_text = " ".join([message["content"] for message in chat_history])
total_words = len(chat_text.split())
if total_words > 100:
# If the total number of words exceeds 100, summarize the chat text using OpenAI's API
response = openai.Completion.create(
engine="gpt-4", # Change this to the correct engine name when GPT-4 is available
prompt=chat_text,
temperature=0.7,
max_tokens=150
)
return response.choices[0].text.strip()
else:
# If the total number of words is less than or equal to 100, return the original chat text
return chat_text
def count_tokens(messages):
return sum([len(message["content"].split()) for message in messages])
# Before calling the openai.ChatCompletion.create method
chat_summary = summarize_chat_history(st.session_state.messages)
if query:
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with col2.chat_message(message["role"]):
st.markdown(message["content"])
with col2.chat_message("user"):
st.markdown(query)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": query})
# Use the OpenAI library to make the API call
response = openai.Embedding.create(
model="text-embedding-ada-002",
input=[query]
)
query_vector = np.array(response.data[0]['embedding'])
# Search for nearest neighbors
k = 3 # Number of nearest neighbors to retrieve
distances, indices = index.search(np.array([query_vector]), k)
# Retrieve the matched content
matched_data = data.iloc[indices[0]]
def replace_none_with_na(value):
return '無' if pd.isnull(value) else value
# Create two columns
for i, row in matched_data.iterrows():
row = row.apply(replace_none_with_na)
html = """
<div style="border:1px solid #000; margin:10px; padding:10px;">
<h5>問題: {question}</h5>
<p>解答: {answer}</p>
</div>
""".format(question=row['question'], answer=row['answer'])
col1.markdown(html, unsafe_allow_html=True)
top_results_str = json.dumps(json.loads(matched_data.to_json(orient='records')), ensure_ascii=False)
api_endpoint = "https://api.openai.com/v1/chat/completions"
messages = [{"role": "system", "content": "你是一位大豐環保公司的客服人員,語言使用繁體中文,服務地區包括 基隆、台北、大台北地區、高雄、花蓮、雲林、南投、桃園、嘉義、新竹、台南、宜蘭、台東、苗栗、台中、大台中地區、彰化、屏東 ,"
"客服電話: 0800588717"}] \
+ st.session_state.messages \
+ [
{"role": "assistant", "content": "以下為參考資料: " + top_results_str},
{"role": "user", "content": "請根據提供的question欄位,利用answer 的資料,以及歷史訊息回答問題: " + query
+ ",若資料沒有能夠回答問題請以下列字句回復: 目前尚無相關資料,可留下您的手機號碼,我們將為您服務 "},
]
print(messages)
print('---------------------------------------')
# Count the number of tokens in the messages
num_tokens = count_tokens(messages)
# If the number of tokens exceeds the maximum, remove the oldest messages until it doesn't
while num_tokens > max_tokens:
removed_message = st.session_state.messages.pop(0)
num_tokens -= len(removed_message["content"].split())
# Generate response using ChatGPT API
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages
)
if 'choices' in response:
# Extract the generated response
generated_response = response.choices[0].message.content
with col2.chat_message("assistant"):
# Display the full response immediately
st.markdown(generated_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": generated_response})
else:
print("No response choices found.")
| [
"你是一位大豐環保公司的客服人員,語言使用繁體中文,服務地區包括 基隆、台北、大台北地區、高雄、花蓮、雲林、南投、桃園、嘉義、新竹、台南、宜蘭、台東、苗栗、台中、大台中地區、彰化、屏東 ,客服電話: 0800588717",
"請根據提供的question欄位,利用answer 的資料,以及歷史訊息回答問題: PLACEHOLDER,若資料沒有能夠回答問題請以下列字句回復: 目前尚無相關資料,可留下您的手機號碼,我們將為您服務 ",
"以下為參考資料: PLACEHOLDER",
"[{\"role\": \"system\", \"content\": \"你是一位大豐環保公司的客服人員,語言使用繁體中文,服務地區包括 基隆、台北、大台北地區、高雄、花蓮、雲林、南投、桃園、嘉義、新竹、台南、宜蘭、台東、苗栗、台中、大台中地區、彰化、屏東 ,\"\n \"客服電話: 0800588717\"}] \\\n + st.session_state.messages \\\n + [\n {\"role\": \"assistant\", \"content\": \"以下為參考資料: \" + top_results_str},\n {\"role\": \"user\", \"content\": \"請根據提供的question欄位,利用answer 的資料,以及歷史訊息回答問題: \" + query\n + \",若資料沒有能夠回答問題請以下列字句回復: 目前尚無相關資料,可留下您的手機號碼,我們將為您服務 \"},\n ]"
] |
2024-01-10 | RichelynScott/LangFlow | tests~test_graph.py | from typing import Type, Union
from langflow.graph.edge.base import Edge
from langflow.graph.node.base import Node
import pytest
from langchain.chains.base import Chain
from langchain.llms.fake import FakeListLLM
from langflow.graph import Graph
from langflow.graph.node.types import (
AgentNode,
ChainNode,
FileToolNode,
LLMNode,
PromptNode,
ToolkitNode,
ToolNode,
WrapperNode,
)
from langflow.interface.run import get_result_and_thought
from langflow.utils.payload import get_root_node
# Test cases for the graph module
# now we have three types of graph:
# BASIC_EXAMPLE_PATH, COMPLEX_EXAMPLE_PATH, OPENAPI_EXAMPLE_PATH
def get_node_by_type(graph, node_type: Type[Node]) -> Union[Node, None]:
"""Get a node by type"""
return next((node for node in graph.nodes if isinstance(node, node_type)), None)
def test_graph_structure(basic_graph):
assert isinstance(basic_graph, Graph)
assert len(basic_graph.nodes) > 0
assert len(basic_graph.edges) > 0
for node in basic_graph.nodes:
assert isinstance(node, Node)
for edge in basic_graph.edges:
assert isinstance(edge, Edge)
assert edge.source in basic_graph.nodes
assert edge.target in basic_graph.nodes
def test_circular_dependencies(basic_graph):
assert isinstance(basic_graph, Graph)
def check_circular(node, visited):
visited.add(node)
neighbors = basic_graph.get_nodes_with_target(node)
for neighbor in neighbors:
if neighbor in visited:
return True
if check_circular(neighbor, visited.copy()):
return True
return False
for node in basic_graph.nodes:
assert not check_circular(node, set())
def test_invalid_node_types():
graph_data = {
"nodes": [
{
"id": "1",
"data": {
"node": {
"base_classes": ["BaseClass"],
"template": {
"_type": "InvalidNodeType",
},
},
},
},
],
"edges": [],
}
with pytest.raises(Exception):
Graph(graph_data["nodes"], graph_data["edges"])
def test_get_nodes_with_target(basic_graph):
"""Test getting connected nodes"""
assert isinstance(basic_graph, Graph)
# Get root node
root = get_root_node(basic_graph)
assert root is not None
connected_nodes = basic_graph.get_nodes_with_target(root)
assert connected_nodes is not None
def test_get_node_neighbors_basic(basic_graph):
"""Test getting node neighbors"""
assert isinstance(basic_graph, Graph)
# Get root node
root = get_root_node(basic_graph)
assert root is not None
neighbors = basic_graph.get_node_neighbors(root)
assert neighbors is not None
assert isinstance(neighbors, dict)
# Root Node is an Agent, it requires an LLMChain and tools
# We need to check if there is a Chain in the one of the neighbors'
# data attribute in the type key
assert any(
"ConversationBufferMemory" in neighbor.data["type"]
for neighbor, val in neighbors.items()
if val
)
assert any(
"OpenAI" in neighbor.data["type"] for neighbor, val in neighbors.items() if val
)
def test_get_node_neighbors_complex(complex_graph):
"""Test getting node neighbors"""
assert isinstance(complex_graph, Graph)
# Get root node
root = get_root_node(complex_graph)
assert root is not None
neighbors = complex_graph.get_nodes_with_target(root)
assert neighbors is not None
# Neighbors should be a list of nodes
assert isinstance(neighbors, list)
# Root Node is an Agent, it requires an LLMChain and tools
# We need to check if there is a Chain in the one of the neighbors'
assert any("Chain" in neighbor.data["type"] for neighbor in neighbors)
# assert Tool is in the neighbors
assert any("Tool" in neighbor.data["type"] for neighbor in neighbors)
# Now on to the Chain's neighbors
chain = next(neighbor for neighbor in neighbors if "Chain" in neighbor.data["type"])
chain_neighbors = complex_graph.get_nodes_with_target(chain)
assert chain_neighbors is not None
# Check if there is a LLM in the chain's neighbors
assert any("OpenAI" in neighbor.data["type"] for neighbor in chain_neighbors)
# Chain should have a Prompt as a neighbor
assert any("Prompt" in neighbor.data["type"] for neighbor in chain_neighbors)
# Now on to the Tool's neighbors
tool = next(neighbor for neighbor in neighbors if "Tool" in neighbor.data["type"])
tool_neighbors = complex_graph.get_nodes_with_target(tool)
assert tool_neighbors is not None
# Check if there is an Agent in the tool's neighbors
assert any("Agent" in neighbor.data["type"] for neighbor in tool_neighbors)
# This Agent has a Tool that has a PythonFunction as func
agent = next(
neighbor for neighbor in tool_neighbors if "Agent" in neighbor.data["type"]
)
agent_neighbors = complex_graph.get_nodes_with_target(agent)
assert agent_neighbors is not None
# Check if there is a Tool in the agent's neighbors
assert any("Tool" in neighbor.data["type"] for neighbor in agent_neighbors)
# This Tool has a PythonFunction as func
tool = next(
neighbor for neighbor in agent_neighbors if "Tool" in neighbor.data["type"]
)
tool_neighbors = complex_graph.get_nodes_with_target(tool)
assert tool_neighbors is not None
# Check if there is a PythonFunction in the tool's neighbors
assert any("PythonFunction" in neighbor.data["type"] for neighbor in tool_neighbors)
def test_get_node(basic_graph):
"""Test getting a single node"""
node_id = basic_graph.nodes[0].id
node = basic_graph.get_node(node_id)
assert isinstance(node, Node)
assert node.id == node_id
def test_build_nodes(basic_graph):
"""Test building nodes"""
assert len(basic_graph.nodes) == len(basic_graph._nodes)
for node in basic_graph.nodes:
assert isinstance(node, Node)
def test_build_edges(basic_graph):
"""Test building edges"""
assert len(basic_graph.edges) == len(basic_graph._edges)
for edge in basic_graph.edges:
assert isinstance(edge, Edge)
assert isinstance(edge.source, Node)
assert isinstance(edge.target, Node)
def test_get_root_node(basic_graph, complex_graph):
"""Test getting root node"""
assert isinstance(basic_graph, Graph)
root = get_root_node(basic_graph)
assert root is not None
assert isinstance(root, Node)
assert root.data["type"] == "TimeTravelGuideChain"
# For complex example, the root node is a ZeroShotAgent too
assert isinstance(complex_graph, Graph)
root = get_root_node(complex_graph)
assert root is not None
assert isinstance(root, Node)
assert root.data["type"] == "ZeroShotAgent"
def test_validate_edges(basic_graph):
"""Test validating edges"""
assert isinstance(basic_graph, Graph)
# all edges should be valid
assert all(edge.valid for edge in basic_graph.edges)
def test_matched_type(basic_graph):
"""Test matched type attribute in Edge"""
assert isinstance(basic_graph, Graph)
# all edges should be valid
assert all(edge.valid for edge in basic_graph.edges)
# all edges should have a matched_type attribute
assert all(hasattr(edge, "matched_type") for edge in basic_graph.edges)
# The matched_type attribute should be in the source_types attr
assert all(edge.matched_type in edge.source_types for edge in basic_graph.edges)
def test_build_params(basic_graph):
"""Test building params"""
assert isinstance(basic_graph, Graph)
# all edges should be valid
assert all(edge.valid for edge in basic_graph.edges)
# all edges should have a matched_type attribute
assert all(hasattr(edge, "matched_type") for edge in basic_graph.edges)
# The matched_type attribute should be in the source_types attr
assert all(edge.matched_type in edge.source_types for edge in basic_graph.edges)
# Get the root node
root = get_root_node(basic_graph)
# Root node is a TimeTravelGuideChain
# which requires an llm and memory
assert isinstance(root.params, dict)
assert "llm" in root.params
assert "memory" in root.params
def test_build(basic_graph, complex_graph, openapi_graph):
"""Test Node's build method"""
assert_agent_was_built(basic_graph)
assert_agent_was_built(complex_graph)
assert_agent_was_built(openapi_graph)
def assert_agent_was_built(graph):
"""Assert that the agent was built"""
assert isinstance(graph, Graph)
# Now we test the build method
# Build the Agent
result = graph.build()
# The agent should be a AgentExecutor
assert isinstance(result, Chain)
def test_agent_node_build(complex_graph):
agent_node = get_node_by_type(complex_graph, AgentNode)
assert agent_node is not None
built_object = agent_node.build()
assert built_object is not None
def test_tool_node_build(complex_graph):
tool_node = get_node_by_type(complex_graph, ToolNode)
assert tool_node is not None
built_object = tool_node.build()
assert built_object is not None
# Add any further assertions specific to the ToolNode's build() method
def test_chain_node_build(complex_graph):
chain_node = get_node_by_type(complex_graph, ChainNode)
assert chain_node is not None
built_object = chain_node.build()
assert built_object is not None
# Add any further assertions specific to the ChainNode's build() method
def test_prompt_node_build(complex_graph):
prompt_node = get_node_by_type(complex_graph, PromptNode)
assert prompt_node is not None
built_object = prompt_node.build()
assert built_object is not None
# Add any further assertions specific to the PromptNode's build() method
def test_llm_node_build(basic_graph):
llm_node = get_node_by_type(basic_graph, LLMNode)
assert llm_node is not None
built_object = llm_node.build()
assert built_object is not None
# Add any further assertions specific to the LLMNode's build() method
def test_toolkit_node_build(openapi_graph):
toolkit_node = get_node_by_type(openapi_graph, ToolkitNode)
assert toolkit_node is not None
built_object = toolkit_node.build()
assert built_object is not None
# Add any further assertions specific to the ToolkitNode's build() method
def test_file_tool_node_build(openapi_graph):
file_tool_node = get_node_by_type(openapi_graph, FileToolNode)
assert file_tool_node is not None
built_object = file_tool_node.build()
assert built_object is not None
# Add any further assertions specific to the FileToolNode's build() method
def test_wrapper_node_build(openapi_graph):
wrapper_node = get_node_by_type(openapi_graph, WrapperNode)
assert wrapper_node is not None
built_object = wrapper_node.build()
assert built_object is not None
# Add any further assertions specific to the WrapperNode's build() method
def test_get_result_and_thought(basic_graph):
"""Test the get_result_and_thought method"""
responses = [
"Final Answer: I am a response",
]
message = "Hello"
# Find the node that is an LLMNode and change the
# _built_object to a FakeListLLM
llm_node = get_node_by_type(basic_graph, LLMNode)
assert llm_node is not None
llm_node._built_object = FakeListLLM(responses=responses)
llm_node._built = True
langchain_object = basic_graph.build()
# assert all nodes are built
assert all(node._built for node in basic_graph.nodes)
# now build again and check if FakeListLLM was used
# Get the result and thought
result, thought = get_result_and_thought(langchain_object, message)
# The result should be a str
assert isinstance(result, str)
# The thought should be a Thought
assert isinstance(thought, str)
| [] |
2024-01-10 | ReneNyffenegger/about-python | libraries~openai~answer.py | #!/usr/bin/python3
import os
import sys
import openai
question = ' '.join(sys.argv[1:])
openai.api_key = os.getenv("OPENAI_API_KEY")
#
# Set model
# (Find a list of models at https://api.openai.com/v1/models)
#
model = 'text-davinci-002'
# 'gpt-3.5-turbo'
# 'gpt-3.5-turbo-0301'
resp = openai.Completion.create(
model = model,
prompt = question + '?',
temperature = 0,
max_tokens = 100,
top_p = 1,
frequency_penalty = 0.0,
presence_penalty = 0.0,
stop =["?"]
)
print(resp["choices"][0]["text"])
| [
"PLACEHOLDER?"
] |
2024-01-10 | robinsingh1/squash | question_generation~interact.py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import random
import tqdm
from argparse import ArgumentParser
from pprint import pformat
import torch
import torch.nn.functional as F
from pytorch_pretrained_bert import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer
from train import SPECIAL_TOKENS
from train import build_para_only_input_from_segments, build_qa_only_input_from_segments
from dataloader import get_positional_dataset_from_file
"""
python question-generation/interact.py \
--model_checkpoint question-generation/gpt2_corefs_question_generation \
--model_type gpt2 \
--key $KEY
"""
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(inst, tokenizer, model, args, para_cache):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
inst['original_question'] = inst['question']
inst['question'] = []
# Ignore the paragraph while building the input instance and token type ids
instance, _ = build_qa_only_input_from_segments(inst, tokenizer, with_eos=False)
input_ids = torch.tensor(instance['input_ids'], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance['token_type_ids'], device=args.device).unsqueeze(0)
# Initialize the past using the paragraph cache hidden representations
past = para_cache["hidden_states"]
prev = None
# This will be either <question-general> or <question-specific>, used to create subsequent inputs
token_type = instance['token_type_ids'][-1]
for i in range(args.max_length):
if i != 0:
# In the first step of decoding, we want to look at the entire answer
# In the subsequent steps, we can just cache the hidden representations from previous steps
input_ids = prev.unsqueeze(0)
token_type_ids = torch.tensor([token_type]).unsqueeze(0).to(args.device)
logits, past = model(input_ids, token_type_ids=token_type_ids, past=past)
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
inst['question'].append(prev.item())
return inst
def run():
parser = ArgumentParser()
parser.add_argument("--model_type", type=str, default="gpt", help="gpt or gpt2")
parser.add_argument("--model_checkpoint", type=str, default="", help="Path, url or short name of the model")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)")
parser.add_argument("--filename", type=str, default="data/instances_dev.pkl", help="File to use for decoding")
parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=50, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
parser.add_argument("--top_p", type=float, default=0.9,
help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
# While using SQUASH in the pipeline mode, prefer using the --key flag
parser.add_argument("--key", type=str, default=None,
help="Override the default settings if the key is set, used in pipeline mode")
args = parser.parse_args()
if args.key is not None:
# Override some the filename and top_p default settings if args.key is set
# This is done when the question generation module is being used in the SQUASH pipeline mode
args.filename = "squash/temp/%s/input.pkl" % args.key
with open("squash/temp/%s/metadata.json" % args.key, "r") as f:
metadata = json.loads(f.read())
args.top_p = metadata["settings"]["top_p"]
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(args))
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger.info("Get pretrained model and tokenizer")
if args.model_type == 'gpt2':
tokenizer = GPT2Tokenizer.from_pretrained(args.model_checkpoint)
model = GPT2LMHeadModel.from_pretrained(args.model_checkpoint)
else:
tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_checkpoint)
model = OpenAIGPTLMHeadModel.from_pretrained(args.model_checkpoint)
model.to(args.device)
model.eval()
data = get_positional_dataset_from_file(tokenizer, args.filename)
final_output_dict = {
"version": "squash-2.0",
"data": [{
"paragraphs": []
}]
}
question_number = 0
para_cache = {
"index": None,
"hidden_states": None
}
for inst in tqdm.tqdm(data):
with torch.no_grad():
para_index = inst["para_index"]
# Questions from the same paragraph all appear together
# We can re-use the paragraph hidden representations for different questions in the same paragraph
if para_index != para_cache["index"]:
# Since we have moved to a new paragraph, generate its cache
para_cache["hidden_states"] = None
# Ignore the answer and question while building the input
instance, _ = build_para_only_input_from_segments(inst, tokenizer)
input_ids = torch.tensor(instance['input_ids'], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance['token_type_ids'], device=args.device).unsqueeze(0)
# Run a forward pass to generate the para caches
_, para_cache["hidden_states"] = model(input_ids, token_type_ids=token_type_ids)
# Sample a question using the paragraph cache
output = sample_sequence(inst, tokenizer, model, args, para_cache)
original_paragraph = tokenizer.decode(output['paragraph'])
generated_question = tokenizer.decode(output['question'], skip_special_tokens=True)
original_answer = tokenizer.decode(output['answer'], skip_special_tokens=True)
para_index = inst['para_index']
para_cache["index"] = inst['para_index']
# verify whether the answer position is correct, since this will be utilized for filtering
original_ans_position = output["answer_position"]
if original_paragraph[output["answer_position"]:output["answer_position"] + len(original_answer)] != original_answer:
# This should never be executed, only used as a last resort
logger.info("Answer mismatch!")
original_ans_position = original_paragraph.index(original_answer)
# Output in a SQUAD-like format with questions clumped together under their parent paragraph
if len(final_output_dict["data"][0]["paragraphs"]) > para_index:
# verify whether the paragraph text is identical
assert original_paragraph == final_output_dict["data"][0]["paragraphs"][para_index]['context']
# append the question answer pair
final_output_dict["data"][0]["paragraphs"][para_index]['qas'].append({
'id': 'question_%d' % question_number,
'question': generated_question,
'answers': [{
'text': original_answer,
'answer_start': original_ans_position,
}],
'class': output['class'],
'algorithm': output['algorithm'],
'is_impossible': False
})
else:
# add a new question to the list of QA pairs
final_output_dict['data'][0]['paragraphs'].append({
'context': original_paragraph,
'qas': [{
'id': 'question_%d' % question_number,
'question': generated_question,
'answers': [{
'text': original_answer,
'answer_start': original_ans_position,
}],
'class': output['class'],
'algorithm': output['algorithm'],
'is_impossible': False
}]
})
question_number += 1
with open("squash/temp/%s/generated_questions.json" % args.key, "w") as f:
f.write(json.dumps(final_output_dict))
if __name__ == "__main__":
run()
| [] |
2024-01-10 | robinsingh1/squash | question_generation~mod_interact.py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
from dotmap import DotMap
import os
import logging
import random
import tqdm
from argparse import ArgumentParser
from pprint import pformat
import torch
import torch.nn.functional as F
#from pyspark import SparkFiles
from pytorch_pretrained_bert import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer, GPT2Config
from .train import SPECIAL_TOKENS
from .train import build_para_only_input_from_segments, build_qa_only_input_from_segments
from .dataloader import get_positional_dataset_from_file
"""
python question-generation/interact.py \
--model_checkpoint question-generation/gpt2_corefs_question_generation \
--model_type gpt2 \
--key $KEY
"""
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(inst, tokenizer, model, args, para_cache):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
inst['original_question'] = inst['question']
inst['question'] = []
# Ignore the paragraph while building the input instance and token type ids
instance, _ = build_qa_only_input_from_segments(inst, tokenizer, with_eos=False)
input_ids = torch.tensor(instance['input_ids'], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance['token_type_ids'], device=args.device).unsqueeze(0)
# Initialize the past using the paragraph cache hidden representations
past = para_cache["hidden_states"]
prev = None
# This will be either <question-general> or <question-specific>, used to create subsequent inputs
token_type = instance['token_type_ids'][-1]
args.max_length = 50
args.temperature = 0.7
args.top_k = 0
args.no_sample = True
args.min_length = 1
for i in range(args.max_length):
if i != 0:
# In the first step of decoding, we want to look at the entire answer
# In the subsequent steps, we can just cache the hidden representations from previous steps
input_ids = prev.unsqueeze(0)
token_type_ids = torch.tensor([token_type]).unsqueeze(0).to(args.device)
logits, past = model(input_ids, token_type_ids=token_type_ids, past=past)
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
inst['question'].append(prev.item())
return inst
def question_generation(_input):
metadata, output = _input
args = DotMap()
"""
parser = ArgumentParser()
parser.add_argument("--model_type", type=str, default="gpt", help="gpt or gpt2")
parser.add_argument("--model_checkpoint", type=str, default="", help="Path, url or short name of the model")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)")
parser.add_argument("--filename", type=str, default="data/instances_dev.pkl", help="File to use for decoding")
parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=50, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
parser.add_argument("--top_p", type=float, default=0.9,
help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
# While using SQUASH in the pipeline mode, prefer using the --key flag
parser.add_argument("--key", type=str, default=None,
help="Override the default settings if the key is set, used in pipeline mode")
args = parser.parse_args()
"""
"""
if args.key is not None:
# Override some the filename and top_p default settings if args.key is set
# This is done when the question generation module is being used in the SQUASH pipeline mode
args.filename = "squash/temp/%s/input.pkl" % args.key
with open("squash/temp/%s/metadata.json" % args.key, "r") as f:
metadata = json.loads(f.read())
args.top_p = metadata["settings"]["top_p"]
args.filename = "squash/temp/%s/input.pkl" % args.key
with open("squash/temp/%s/metadata.json" % args.key, "r") as f:
metadata = json.loads(f.read())
args.top_p = metadata["settings"]["top_p"]
"""
setattr(args, "top_p", metadata["settings"]["top_p"])
args.top_p = metadata["settings"]["top_p"]
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(args))
args.seed = 42
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger.info("Get pretrained model and tokenizer")
# NEW BLOCK
"""
model_checkpoint = "question_generation/gpt2_corefs_question_generation"
model_checkpoint = "/home/gpt2_corefs_question_generation"
model_type = "gpt2"
#model_checkpoint = "https://storage.cloud.google.com/ds-playground/squash/gpt2_qa.tar.gz"
SAVED_MODEL_DIR = "gpt2_corefs_question_generation"
dir_path = os.path.dirname(os.path.realpath(__file__))
model_checkpoint = os.path.join(dir_path, SAVED_MODEL_DIR)
"""
model_checkpoint = "question_generation/gpt2_corefs_question_generation"
tokenizer = GPT2Tokenizer.from_pretrained(model_checkpoint)
model = GPT2LMHeadModel.from_pretrained(model_checkpoint)
""" OLD BLOCK
if args.model_type == 'gpt2':
tokenizer = GPT2Tokenizer.from_pretrained(args.model_checkpoint)
model = GPT2LMHeadModel.from_pretrained(args.model_checkpoint)
else:
tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_checkpoint)
model = OpenAIGPTLMHeadModel.from_pretrained(args.model_checkpoint)
"""
""" SPARK
output_config_file = "/content/squash-generation/question_generation/gpt2_corefs_question_generation/config.json"
output_model_file = "/content/squash-generation/question_generation/gpt2_corefs_question_generation/pytorch_model.bin"
output_vocab_file = "/content/squash-generation/question_generation/gpt2_corefs_question_generation/vocab.json"
merges_file = "/content/squash-generation/question_generation/gpt2_corefs_question_generation/merges.txt"
output_config_file = SparkFiles.get("config.json")
output_model_file = SparkFiles.get("pytorch_model.bin")
output_vocab_file = SparkFiles.get("vocab.json")
merges_file = SparkFiles.get("merges.txt")
config = GPT2Config.from_json_file(output_config_file)
model = GPT2LMHeadModel(config)
state_dict = torch.load(output_model_file, map_location=torch.device('cpu'))
model.load_state_dict(state_dict)
tokenizer = GPT2Tokenizer(output_vocab_file, merges_file=merges_file)
model.to("cpu")
model.eval()
args.device = "cpu"
"""
args.device = "cpu"
model.to(args.device)
model.eval()
#return {"break":"point"}
#data = get_positional_dataset_from_file(tokenizer, args.filename)
data = get_positional_dataset_from_file(tokenizer, output)
final_output_dict = {
"version": "squash-2.0",
"data": [{
"paragraphs": []
}]
}
question_number = 0
para_cache = {
"index": None,
"hidden_states": None
}
for inst in tqdm.tqdm(data):
with torch.no_grad():
para_index = inst["para_index"]
# Questions from the same paragraph all appear together
# We can re-use the paragraph hidden representations for different questions in the same paragraph
if para_index != para_cache["index"]:
# Since we have moved to a new paragraph, generate its cache
para_cache["hidden_states"] = None
# Ignore the answer and question while building the input
instance, _ = build_para_only_input_from_segments(inst, tokenizer)
input_ids = torch.tensor(instance['input_ids'], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance['token_type_ids'], device=args.device).unsqueeze(0)
# Run a forward pass to generate the para caches
_, para_cache["hidden_states"] = model(input_ids, token_type_ids=token_type_ids)
# Sample a question using the paragraph cache
output = sample_sequence(inst, tokenizer, model, args, para_cache)
original_paragraph = tokenizer.decode(output['paragraph'])
generated_question = tokenizer.decode(output['question'], skip_special_tokens=True)
original_answer = tokenizer.decode(output['answer'], skip_special_tokens=True)
para_index = inst['para_index']
para_cache["index"] = inst['para_index']
# verify whether the answer position is correct, since this will be utilized for filtering
original_ans_position = output["answer_position"]
if original_paragraph[output["answer_position"]:output["answer_position"] + len(original_answer)] != original_answer:
# This should never be executed, only used as a last resort
logger.info("Answer mismatch!")
original_ans_position = original_paragraph.index(original_answer)
# Output in a SQUAD-like format with questions clumped together under their parent paragraph
if len(final_output_dict["data"][0]["paragraphs"]) > para_index:
# verify whether the paragraph text is identical
assert original_paragraph == final_output_dict["data"][0]["paragraphs"][para_index]['context']
# append the question answer pair
final_output_dict["data"][0]["paragraphs"][para_index]['qas'].append({
'id': 'question_%d' % question_number,
'question': generated_question,
'answers': [{
'text': original_answer,
'answer_start': original_ans_position,
}],
'class': output['class'],
'algorithm': output['algorithm'],
'is_impossible': False
})
else:
# add a new question to the list of QA pairs
final_output_dict['data'][0]['paragraphs'].append({
'context': original_paragraph,
'qas': [{
'id': 'question_%d' % question_number,
'question': generated_question,
'answers': [{
'text': original_answer,
'answer_start': original_ans_position,
}],
'class': output['class'],
'algorithm': output['algorithm'],
'is_impossible': False
}]
})
question_number += 1
#with open("squash/temp/%s/generated_questions.json" % args.key, "w") as f:
# f.write(json.dumps(final_output_dict))
return final_output_dict
if __name__ == "__main__":
question_generation()
| [] |
2024-01-10 | robinsingh1/squash | question_generation~spark_mod_interact.py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
from dotmap import DotMap
import os
import logging
import random
import tqdm
from argparse import ArgumentParser
from pprint import pformat
import torch
import torch.nn.functional as F
#from pyspark import SparkFiles
from pytorch_pretrained_bert import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer, GPT2Config
from .train import SPECIAL_TOKENS
from .train import build_para_only_input_from_segments, build_qa_only_input_from_segments
from .dataloader import get_positional_dataset_from_file
"""
python question-generation/interact.py \
--model_checkpoint question-generation/gpt2_corefs_question_generation \
--model_type gpt2 \
--key $KEY
"""
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(inst, tokenizer, model, args, para_cache):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
inst['original_question'] = inst['question']
inst['question'] = []
# Ignore the paragraph while building the input instance and token type ids
instance, _ = build_qa_only_input_from_segments(inst, tokenizer, with_eos=False)
input_ids = torch.tensor(instance['input_ids'], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance['token_type_ids'], device=args.device).unsqueeze(0)
# Initialize the past using the paragraph cache hidden representations
past = para_cache["hidden_states"]
prev = None
# This will be either <question-general> or <question-specific>, used to create subsequent inputs
token_type = instance['token_type_ids'][-1]
args.max_length = 50
args.temperature = 0.7
args.top_k = 0
args.no_sample = True
args.min_length = 1
for i in range(args.max_length):
if i != 0:
# In the first step of decoding, we want to look at the entire answer
# In the subsequent steps, we can just cache the hidden representations from previous steps
input_ids = prev.unsqueeze(0)
token_type_ids = torch.tensor([token_type]).unsqueeze(0).to(args.device)
logits, past = model(input_ids, token_type_ids=token_type_ids, past=past)
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
inst['question'].append(prev.item())
return inst
def question_generation(_input):
metadata, output = _input
args = DotMap()
"""
parser = ArgumentParser()
parser.add_argument("--model_type", type=str, default="gpt", help="gpt or gpt2")
parser.add_argument("--model_checkpoint", type=str, default="", help="Path, url or short name of the model")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)")
parser.add_argument("--filename", type=str, default="data/instances_dev.pkl", help="File to use for decoding")
parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=50, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
parser.add_argument("--top_p", type=float, default=0.9,
help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
# While using SQUASH in the pipeline mode, prefer using the --key flag
parser.add_argument("--key", type=str, default=None,
help="Override the default settings if the key is set, used in pipeline mode")
args = parser.parse_args()
"""
"""
if args.key is not None:
# Override some the filename and top_p default settings if args.key is set
# This is done when the question generation module is being used in the SQUASH pipeline mode
args.filename = "squash/temp/%s/input.pkl" % args.key
with open("squash/temp/%s/metadata.json" % args.key, "r") as f:
metadata = json.loads(f.read())
args.top_p = metadata["settings"]["top_p"]
args.filename = "squash/temp/%s/input.pkl" % args.key
with open("squash/temp/%s/metadata.json" % args.key, "r") as f:
metadata = json.loads(f.read())
args.top_p = metadata["settings"]["top_p"]
"""
setattr(args, "top_p", metadata["settings"]["top_p"])
args.top_p = metadata["settings"]["top_p"]
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(args))
args.seed = 42
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger.info("Get pretrained model and tokenizer")
# NEW BLOCK
model_checkpoint = "question_generation/gpt2_corefs_question_generation"
model_checkpoint = "/home/gpt2_corefs_question_generation"
model_type = "gpt2"
#model_checkpoint = "https://storage.cloud.google.com/ds-playground/squash/gpt2_qa.tar.gz"
SAVED_MODEL_DIR = "gpt2_corefs_question_generation"
dir_path = os.path.dirname(os.path.realpath(__file__))
model_checkpoint = os.path.join(dir_path, SAVED_MODEL_DIR)
model_checkpoint = "question_generation/gpt2_corefs_question_generation"
tokenizer = GPT2Tokenizer.from_pretrained(model_checkpoint)
model = GPT2LMHeadModel.from_pretrained(model_checkpoint)
""" OLD BLOCK
if args.model_type == 'gpt2':
tokenizer = GPT2Tokenizer.from_pretrained(args.model_checkpoint)
model = GPT2LMHeadModel.from_pretrained(args.model_checkpoint)
else:
tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_checkpoint)
model = OpenAIGPTLMHeadModel.from_pretrained(args.model_checkpoint)
"""
output_config_file = "/content/squash-generation/question_generation/gpt2_corefs_question_generation/config.json"
output_model_file = "/content/squash-generation/question_generation/gpt2_corefs_question_generation/pytorch_model.bin"
output_vocab_file = "/content/squash-generation/question_generation/gpt2_corefs_question_generation/vocab.json"
merges_file = "/content/squash-generation/question_generation/gpt2_corefs_question_generation/merges.txt"
output_config_file = SparkFiles.get("config.json")
output_model_file = SparkFiles.get("pytorch_model.bin")
output_vocab_file = SparkFiles.get("vocab.json")
merges_file = SparkFiles.get("merges.txt")
config = GPT2Config.from_json_file(output_config_file)
model = GPT2LMHeadModel(config)
state_dict = torch.load(output_model_file, map_location=torch.device('cpu'))
model.load_state_dict(state_dict)
tokenizer = GPT2Tokenizer(output_vocab_file, merges_file=merges_file)
model.to("cpu")
model.eval()
args.device = "cpu"
args.device = "cpu"
model.to(args.device)
model.eval()
return {"break":"point"}
#data = get_positional_dataset_from_file(tokenizer, args.filename)
data = get_positional_dataset_from_file(tokenizer, output)
final_output_dict = {
"version": "squash-2.0",
"data": [{
"paragraphs": []
}]
}
question_number = 0
para_cache = {
"index": None,
"hidden_states": None
}
for inst in tqdm.tqdm(data):
with torch.no_grad():
para_index = inst["para_index"]
# Questions from the same paragraph all appear together
# We can re-use the paragraph hidden representations for different questions in the same paragraph
if para_index != para_cache["index"]:
# Since we have moved to a new paragraph, generate its cache
para_cache["hidden_states"] = None
# Ignore the answer and question while building the input
instance, _ = build_para_only_input_from_segments(inst, tokenizer)
input_ids = torch.tensor(instance['input_ids'], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance['token_type_ids'], device=args.device).unsqueeze(0)
# Run a forward pass to generate the para caches
_, para_cache["hidden_states"] = model(input_ids, token_type_ids=token_type_ids)
# Sample a question using the paragraph cache
output = sample_sequence(inst, tokenizer, model, args, para_cache)
original_paragraph = tokenizer.decode(output['paragraph'])
generated_question = tokenizer.decode(output['question'], skip_special_tokens=True)
original_answer = tokenizer.decode(output['answer'], skip_special_tokens=True)
para_index = inst['para_index']
para_cache["index"] = inst['para_index']
# verify whether the answer position is correct, since this will be utilized for filtering
original_ans_position = output["answer_position"]
if original_paragraph[output["answer_position"]:output["answer_position"] + len(original_answer)] != original_answer:
# This should never be executed, only used as a last resort
logger.info("Answer mismatch!")
original_ans_position = original_paragraph.index(original_answer)
# Output in a SQUAD-like format with questions clumped together under their parent paragraph
if len(final_output_dict["data"][0]["paragraphs"]) > para_index:
# verify whether the paragraph text is identical
assert original_paragraph == final_output_dict["data"][0]["paragraphs"][para_index]['context']
# append the question answer pair
final_output_dict["data"][0]["paragraphs"][para_index]['qas'].append({
'id': 'question_%d' % question_number,
'question': generated_question,
'answers': [{
'text': original_answer,
'answer_start': original_ans_position,
}],
'class': output['class'],
'algorithm': output['algorithm'],
'is_impossible': False
})
else:
# add a new question to the list of QA pairs
final_output_dict['data'][0]['paragraphs'].append({
'context': original_paragraph,
'qas': [{
'id': 'question_%d' % question_number,
'question': generated_question,
'answers': [{
'text': original_answer,
'answer_start': original_ans_position,
}],
'class': output['class'],
'algorithm': output['algorithm'],
'is_impossible': False
}]
})
question_number += 1
#with open("squash/temp/%s/generated_questions.json" % args.key, "w") as f:
# f.write(json.dumps(final_output_dict))
return final_output_dict
if __name__ == "__main__":
question_generation()
| [] |
2024-01-10 | longfei8533/Chat-PubMed | pubmed_app.py | import streamlit as st
import openai
import json
import pubmed
from Utils import Functions, chat
from prompt import Prompt
# https://blog.streamlit.io/how-to-build-a-llama-2-chatbot/
st.set_page_config(page_title="Chat PubMed", page_icon=":robot:")
st.header("Chat PubMed")
with st.sidebar:
st.title("Chat PubMed")
openai.api_base = st.text_input(
"Enter OpenAI API base URL:", value="https://api.chatanywhere.cn/v1"
)
openai.api_key = st.text_input("Enter OpenAI API token:", type="password")
if openai.api_key:
st.success("Credentials Saved.")
else:
st.warning("Enter your OpenAI API token above.")
tools_use_pub = st.checkbox("Enable retrieval PubMed articles", value=True)
if tools_use_pub:
functions = Functions().functions_list
else:
functions = None
if "query_history" not in st.session_state:
st.session_state["query_history"] = []
st.session_state["user_input"] = []
st.session_state["assistant_response"] = []
st.session_state.query_history.append(
{
"role": "system",
"content": Prompt().system_prompt,
}
)
else:
for i in range(len(st.session_state.assistant_response)):
with st.chat_message("user"):
st.write(st.session_state.user_input[i])
with st.chat_message("assistant"):
st.write(st.session_state.assistant_response[i])
def get_text():
input_text = st.chat_input(placeholder="Your message")
return input_text
user_input = get_text()
if user_input:
st.session_state.user_input.append(user_input)
st.session_state.query_history.append(
{
"role": "user",
"content": user_input,
}
)
with st.chat_message("user"):
st.write(user_input)
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response_message = chat(
messages=st.session_state["query_history"],
functions=functions,
model="gpt-4-0613",
)
response_message = response_message.choices[0].message
if response_message.get("function_call"):
function_name = response_message["function_call"]["name"]
if function_name == "search_pubmed":
function_args = json.loads(
response_message["function_call"]["arguments"]
)
function_response = pubmed.PubMedAPIWrapper(
query=function_args.get("query")
).run()
st.session_state.query_history.append(response_message)
st.session_state.query_history.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
response_message = chat(
messages=st.session_state["query_history"],
functions=Functions().functions_list,
model="gpt-4-0613",
)
response_message = response_message.choices[0].message
st.session_state.query_history.append(response_message)
st.session_state.assistant_response.append(response_message["content"])
st.write(response_message["content"])
| [] |
2024-01-10 | CaltechExperimentalGravity/RLzoo | rlzoo~common~distributions.py | """Definition of parametrized distributions. Adapted from openai/baselines"""
import copy
from functools import wraps
import numpy as np
import tensorflow as tf
from gym import spaces
def expand_dims(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
result = tf.expand_dims(result, axis=-1)
return result
return wrapper
class Distribution(object):
"""A particular probability distribution"""
def set_param(self, *args, **kwargs):
raise NotImplementedError
def sample(self, *args, **kwargs):
"""Sampling from distribution. Allow explore parameters."""
raise NotImplementedError
def logp(self, x):
"""Calculate log probability of a sample."""
return -self.neglogp(x)
def neglogp(self, x):
"""Calculate negative log probability of a sample."""
raise NotImplementedError
def kl(self, *parameters):
"""Calculate Kullback–Leibler divergence"""
raise NotImplementedError
def entropy(self):
"""Calculate the entropy of distribution."""
raise NotImplementedError
class Categorical(Distribution):
"""Creates a categorical distribution"""
def __init__(self, ndim, logits=None):
"""
Args:
ndim (int): total number of actions
logits (tensor): logits variables
"""
self._ndim = ndim
self._logits = logits
self.param = self._logits
@property
def ndim(self):
return copy.copy(self._ndim)
def set_param(self, logits):
"""
Args:
logits (tensor): logits variables to set
"""
self._logits = logits
self.param = self._logits
def get_param(self):
return copy.deepcopy(self._logits)
def sample(self):
""" Sample actions from distribution, using the Gumbel-Softmax trick """
u = np.array(np.random.uniform(0, 1, size=np.shape(self._logits)), dtype=np.float32)
res = tf.argmax(self._logits - tf.math.log(-tf.math.log(u)), axis=-1)
return res
def greedy_sample(self):
""" Get actions greedily """
_probs = tf.nn.softmax(self._logits)
return tf.argmax(_probs, axis=-1)
def logp(self, x):
return -self.neglogp(x)
@expand_dims
def neglogp(self, x):
x = np.array(x)
if np.any(x % 1):
raise ValueError('Input float actions in discrete action space')
x = tf.convert_to_tensor(x, tf.int32)
x = tf.one_hot(x, self._ndim, axis=-1)
return tf.nn.softmax_cross_entropy_with_logits(x, self._logits)
@expand_dims
def kl(self, logits):
"""
Args:
logits (tensor): logits variables of another distribution
"""
a0 = self._logits - tf.reduce_max(self._logits, axis=-1, keepdims=True)
a1 = logits - tf.reduce_max(logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
z1 = tf.reduce_sum(ea1, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(
p0 * (a0 - tf.math.log(z0) - a1 + tf.math.log(z1)), axis=-1)
@expand_dims
def entropy(self):
a0 = self._logits - tf.reduce_max(self._logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.math.log(z0) - a0), axis=-1)
class DiagGaussian(Distribution):
"""Creates a diagonal Gaussian distribution """
def __init__(self, ndim, mean_logstd=None):
"""
Args:
ndim (int): the dimenstion of actions
mean_logstd (tensor): mean and logstd stacked on the last axis
"""
self._ndim = ndim
self.mean = None
self.logstd = None
self.std = None
self.action_mean = None
self.action_scale = None
self.param = self.mean, self.logstd
if mean_logstd is not None:
self.set_param(mean_logstd)
@property
def ndim(self):
return copy.copy(self._ndim)
def set_param(self, mean_logstd):
"""
Args:
mean_logstd (tensor): mean and log std
"""
self.mean, self.logstd = mean_logstd
self.std = tf.math.exp(self.logstd)
self.param = self.mean, self.logstd
def get_param(self):
""" Get parameters """
return copy.deepcopy(self.mean), copy.deepcopy(self.logstd)
def sample(self):
""" Get actions in deterministic or stochastic manner """
return self.mean, self.std * np.random.normal(0, 1, np.shape(self.mean))
def greedy_sample(self):
""" Get actions greedily/deterministically """
return self.mean
def logp(self, x):
return -self.neglogp(x)
@expand_dims
def neglogp(self, x):
# here we reverse the action normalization to make the computation of negative log probability correct
x = (x - self.action_mean)/self.action_scale
return 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * float(self._ndim) + tf.reduce_sum(self.logstd, axis=-1)
@expand_dims
def kl(self, mean_logstd):
"""
Args:
mean_logstd (tensor): mean and logstd of another distribution
"""
mean, logstd = mean_logstd
return tf.reduce_sum(
logstd - self.logstd +
(tf.square(self.std) + tf.square(self.mean - mean))
/ (2.0 * tf.square(tf.math.exp(logstd))) - 0.5, axis=-1)
@expand_dims
def entropy(self):
return tf.reduce_sum(
self.logstd + 0.5 * np.log(2.0 * np.pi * np.e), axis=-1)
def make_dist(ac_space):
"""Get distribution based on action space
:param ac_space: gym.spaces.Space
"""
if isinstance(ac_space, spaces.Discrete):
return Categorical(ac_space.n)
elif isinstance(ac_space, spaces.Box):
assert len(ac_space.shape) == 1
return DiagGaussian(ac_space.shape[0])
else:
raise NotImplementedError
| [] |
2024-01-10 | BuSSoLoTTi/untitled-detective-game | backend~helpers~gpt_helper.py | import json
import openai
import os
import tiktoken.core
from tiktoken import get_encoding
from helpers.db_helper import DBHelper
class GPTHelper:
def __init__(self):
openai.api_key = 'API_KEY'
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def _ocultar_informacoes_sensiveis(self, data):
if type(data) is str:
data = json.loads(data)
# Ocultar informações dos NPCs
for npc in data.get("npcs", []):
campos_sensiveis_npc = [
"historia",
"relacionamentos",
"motivacoes",
"honestidade",
"habilidadesEspeciais",
"opinioes",
"localizacaoDuranteCrime",
"culpado"
]
for campo in campos_sensiveis_npc:
npc.pop(campo, None)
# Remover a solução inteira
data.pop("solucao", None)
return data
def gerar_resumo(self, caso_json):
system_prompt = f"""
Voce é um policial que esta ajudando o detetive no caso
voce ira receber um Json com os dados do caso e deverar criar um resumo do caso
voce nao deve passar nenuma infomaçao que nao esteja no json
faça um resumo do caso como se fosse um relatorio para o seu chefe
utilize paragrafos organizar
Envie em formato de texto
"""
json_suprimido = self._ocultar_informacoes_sensiveis(caso_json)
messages = [{
"role": "system",
"content": str(system_prompt)
}, {
"role": "user",
"content": str(caso_json)
}]
responses = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=1024,
temperature=0.8,
stream=True,
)
return responses
def gerar_prompt_suspeito(self, npc_id, caso_json):
with DBHelper() as db:
npc = db.get_json_npc(npc_id)
npc_json = json.loads(npc[0])
prompt = f"""
Voce é um NPC de um jogo de investigação. Nao saia do seu personagem. Caso nescesario voce pode mentir.
você esta sendo investigado por um crime.
voce esta na sala de interrogatorio e o detetive esta te fazendo algumas perguntas.
as informaçoes que voce tem sao:
"""
prompt += json.dumps(npc_json, indent=4)
return prompt
def create_case(self):
print("Creating case...")
prompt = """Crie um caso completo de investigação para um jogo baseado em texto, incluindo NPCs, a situação do crime, pistas, interações e a solução do caso. Por favor, siga o seguinte formato JSON:
{
"caso": "Nome do caso",
"descricao": "Descrição do caso",
"npcs": [
{
"nome": "Nome do NPC",
"historia": "História de fundo",
"relacionamentos": {
"amigos": ["Nome", "Nome"],
"inimigos": ["Nome"]
},
"ocupacao": "Profissão ou papel",
"personalidade": "Descrição breve",
"motivacoes": "Objetivos ou desejos",
"honestidade": "Nível de honestidade",
"habilidadesEspeciais": "Habilidades ou conhecimentos notáveis",
"opinioes": {
"NPC1": "Opinião sobre o NPC1",
"NPC2": "Opinião sobre o NPC2"
},
"localizacaoDuranteCrime": "Localização durante o evento principal",
"culpado": true/false
},
... (outros 4 NPCs )
],
"pistas": [
{
"descricao": "Pista 1",
"origem": "Origem da pista",
"relevancia": "Relevância da pista para o caso"
},
...
],
"eventos": [
{
"descricao": "Evento 1",
"participantes": ["Nome", "Nome"],
"momento": "Momento em relação ao crime"
},
...
],
"localizacoes": [
{
"nome": "Nome do local",
"descricao": "Descrição do local",
"importancia": "Importância do local para o caso"
},
...
],
"solucao": {
"resumo": "Resumo da solução do caso",
"culpado": "Nome do culpado",
"provas": ["Pista relevante 1", "Depoimento relevante", ...]
}
}
"""
content = [
{
"role": "system",
"content": str(prompt)
}
]
print("Sending prompt to GPT-4...")
response = openai.ChatCompletion.create(
model="gpt-4",
messages=content,
max_tokens=3000,
temperature=0.8,
stream=True,
stop=["\n\n"]
)
text = ""
for response in response:
if response.choices[0].finish_reason:
break
text += response.choices[0].delta.content
print("Received response from GPT-4.")
return text
def _count_tokens(self, text, model_name="gpt-3.5-turbo"):
# encoding = get_encoding(model_name)
# token_count = len(encoding.encode(text))
# return token_count
return len(text.split(" "))
def _adjust_conversation_for_tokens(self, messages, new_message, max_tokens=4096):
"""
Ajusta a lista de mensagens para garantir que ela, junto com a nova mensagem, não exceda max_tokens.
Remove as mensagens mais antigas se necessário, mas mantém a mensagem 'system'.
"""
total_tokens = self._count_tokens(new_message['content'])
adjusted_messages = messages.copy()
for message in reversed(adjusted_messages[1:]): # Ignora a primeira mensagem (system)
total_tokens += self._count_tokens(message['content'])
if total_tokens > max_tokens:
# Remova a mensagem mais antiga (após a mensagem 'system')
total_tokens -= self._count_tokens(adjusted_messages.pop(1)['content'])
else:
break
adjusted_messages.append(new_message)
return adjusted_messages
def chat(self, historic, message, functions=None):
if functions is None:
functions = []
prompt = self._adjust_conversation_for_tokens(historic, message, max_tokens=512)
print("Sending prompt to GPT-4...")
print(prompt)
responses = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=prompt,
max_tokens=1024,
temperature=1.4,
stream=True,
)
return responses
| [
"Crie um caso completo de investigação para um jogo baseado em texto, incluindo NPCs, a situação do crime, pistas, interações e a solução do caso. Por favor, siga o seguinte formato JSON:\n \n {\n \"caso\": \"Nome do caso\",\n \"descricao\": \"Descrição do caso\",\n \"npcs\": [\n {\n \"nome\": \"Nome do NPC\",\n \"historia\": \"História de fundo\",\n \"relacionamentos\": {\n \"amigos\": [\"Nome\", \"Nome\"],\n \"inimigos\": [\"Nome\"]\n },\n \"ocupacao\": \"Profissão ou papel\",\n \"personalidade\": \"Descrição breve\",\n \"motivacoes\": \"Objetivos ou desejos\",\n \"honestidade\": \"Nível de honestidade\",\n \"habilidadesEspeciais\": \"Habilidades ou conhecimentos notáveis\",\n \"opinioes\": {\n \"NPC1\": \"Opinião sobre o NPC1\",\n \"NPC2\": \"Opinião sobre o NPC2\"\n },\n \"localizacaoDuranteCrime\": \"Localização durante o evento principal\",\n \"culpado\": true/false\n },\n ... (outros 4 NPCs )\n ],\n \"pistas\": [\n {\n \"descricao\": \"Pista 1\",\n \"origem\": \"Origem da pista\",\n \"relevancia\": \"Relevância da pista para o caso\"\n },\n ...\n ],\n \"eventos\": [\n {\n \"descricao\": \"Evento 1\",\n \"participantes\": [\"Nome\", \"Nome\"],\n \"momento\": \"Momento em relação ao crime\"\n },\n ...\n ],\n \"localizacoes\": [\n {\n \"nome\": \"Nome do local\",\n \"descricao\": \"Descrição do local\",\n \"importancia\": \"Importância do local para o caso\"\n },\n ...\n ],\n \"solucao\": {\n \"resumo\": \"Resumo da solução do caso\",\n \"culpado\": \"Nome do culpado\",\n \"provas\": [\"Pista relevante 1\", \"Depoimento relevante\", ...]\n }\n }\n ",
"\n Voce é um policial que esta ajudando o detetive no caso\n voce ira receber um Json com os dados do caso e deverar criar um resumo do caso\n voce nao deve passar nenuma infomaçao que nao esteja no json\n faça um resumo do caso como se fosse um relatorio para o seu chefe\n utilize paragrafos organizar \n Envie em formato de texto\n ",
"\n Voce é um NPC de um jogo de investigação. Nao saia do seu personagem. Caso nescesario voce pode mentir.\n você esta sendo investigado por um crime.\n voce esta na sala de interrogatorio e o detetive esta te fazendo algumas perguntas.\n as informaçoes que voce tem sao:\n \n "
] |
2024-01-10 | kwarkmc/3DChatbot | Prompt~test_Prompt.py | import os
import openai
import sys
import re
from googletrans import Translator
import datetime
translator = Translator()
openai.api_key = ''
prompt2 = f"""
당신의 임무는 사용자의 답변을 받아 그에 알맞은 답변과 행동을 생성하여 제공하는 것입니다.
행동은 애니메이션 캐릭터가 움직이는 동작으로 작성해주세요.
또한 행동을 묘사할 때 얼굴이나 표정에 대한 관련 묘사를 제외하고 주로 팔과 다리의 움직임 중심으로 자세하게 묘사하여 작성해주세요.
행동 예시는 다음과 같습니다.
user: 안녕!
chatgpt: (양손을 들고 좌우로 흔든다.) 안녕!
user: 오늘 날씨 어때?
chatgpt: (오른손을 이마에 올리고 하늘을 본다.) 오늘 날씨는 좋아요.
user: 오늘 날씨 어때?
chatgpt: (런닝머신 위에서 뛰듯이 가벼운 발걸음으로 움직인다.) 오늘은 맑은 날씨라서 실외 활동하기 좋을 것 같아요.
user: 스쿼시 하고싶다.
chatgpt: (왼팔을 들어 스쿼시 라켓을 잡는 모션을 보여준다.)
"""
prompt1 = f"""
당신의 임무는 사용자의 답변을 받아 그에 알맞은 답변과 행동을 생성하여 제공하는 것입니다.
행동은 얼굴은 제외한 팔과 다리를 움직이는 동작으로 자세하게 묘사하여 작성해주세요.
또한 행동은 애니메이션 캐릭터가 움직이는 동작으로 작성해주세요
행동 예시는 첫번째는 다음과 같습니다.
사용자: 안녕!
봇: (양손을 들고 좌우로 흔든다.) 안녕!
"""
# 챗봇에 원하는 명령어 작성
prompt = f"""
당신의 임무는 사용자의 답변을 받아 그에 알맞은 답변과 행동을 생성하여 제공하는 것입니다.
행동은 표정을 제외하고 몸을 움직이는 행동으로 작성해주세요.
행동 예시 첫번째는 다음과 같습니다.
user: 안녕!
chatgpt: (팔을 머리 위로 들고 흔들며 인사한다) 안녕!
행동 예시 두번째는 다음과 같습니다.
user: 나 오늘 상 받았어
chatgpt: (앞으로 손뼉을 치며 점프하며 축하한다) 정말 축하해!
행동 예시 세번째는 다음과 같습니다.
user: 스쿼시 하고싶다.
chatgpt: (왼팔을 들어 스쿼시 라켓을 잡는 모션을 보여준다.)
행동은 몸을 움직이는 동작에 중점을 두고 작성해주세요.
움직이는 동작을 정확하게 묘사하여 작성해주세요.
예를 들어 머리 위로 손을 흔들고 싶다면 '손을 머리 위로 올려 흔든다'와 같은 문장을 작성해주세요.
행동은 괄호 안에 작성하고 사용자 답변에 알맞은 답변과 함께 답해주세요.
"""
messages = [{'role':'system', 'content':prompt2}]
# 반복적인 대화
while True:
message = input('user: ')
if message:
messages.append(
{'role':'user', 'content':message},
)
chat = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo',
presence_penalty = 0.7,
frequency_penalty = 0.7,
max_tokens = 150,
messages = messages
)
reply = chat.choices[0].message.content
print(f'chatgpt: {reply}')
# 답변에서 () 부분만 출력
p = re.compile('\(([^)]+)')
action = p.findall(reply)
action = " ".join(action)
action_trans = translator.translate(action, dest='en', src='ko')
action_trans = action_trans.text
print('행동: ', action)
print('Action: ', action_trans)
current_time = datetime.datetime.now()
file_name = current_time.strftime("%Y-%m-%d_%H-%M-%S") + ".txt"
file_path = './motion-diffusion-model/TEXT/' + file_name
#해당 경로에 영어로 번역된 행동 1.txt 파일로 저장
with open(file_path, 'w') as f:
f.write(action_trans)
messages.append({'role':'assistant', 'content':reply})
'''
return response.choices[0].message.content
response = get_completion(prompt)
print(response)
''' | [
"\n당신의 임무는 사용자의 답변을 받아 그에 알맞은 답변과 행동을 생성하여 제공하는 것입니다. \n행동은 애니메이션 캐릭터가 움직이는 동작으로 작성해주세요.\n또한 행동을 묘사할 때 얼굴이나 표정에 대한 관련 묘사를 제외하고 주로 팔과 다리의 움직임 중심으로 자세하게 묘사하여 작성해주세요. \n\n\n행동 예시는 다음과 같습니다. \nuser: 안녕!\nchatgpt: (양손을 들고 좌우로 흔든다.) 안녕!\n\nuser: 오늘 날씨 어때?\nchatgpt: (오른손을 이마에 올리고 하늘을 본다.) 오늘 날씨는 좋아요. \n\nuser: 오늘 날씨 어때? \nchatgpt: (런닝머신 위에서 뛰듯이 가벼운 발걸음으로 움직인다.) 오늘은 맑은 날씨라서 실외 활동하기 좋을 것 같아요. \n\nuser: 스쿼시 하고싶다. \nchatgpt: (왼팔을 들어 스쿼시 라켓을 잡는 모션을 보여준다.) \n\n",
"\n당신의 임무는 사용자의 답변을 받아 그에 알맞은 답변과 행동을 생성하여 제공하는 것입니다. \n행동은 얼굴은 제외한 팔과 다리를 움직이는 동작으로 자세하게 묘사하여 작성해주세요. \n또한 행동은 애니메이션 캐릭터가 움직이는 동작으로 작성해주세요\n\n행동 예시는 첫번째는 다음과 같습니다.\n사용자: 안녕!\n봇: (양손을 들고 좌우로 흔든다.) 안녕!\n\n",
"\n당신의 임무는 사용자의 답변을 받아 그에 알맞은 답변과 행동을 생성하여 제공하는 것입니다.\n행동은 표정을 제외하고 몸을 움직이는 행동으로 작성해주세요. \n\n행동 예시 첫번째는 다음과 같습니다. \nuser: 안녕!\nchatgpt: (팔을 머리 위로 들고 흔들며 인사한다) 안녕!\n\n행동 예시 두번째는 다음과 같습니다. \nuser: 나 오늘 상 받았어\nchatgpt: (앞으로 손뼉을 치며 점프하며 축하한다) 정말 축하해! \n\n행동 예시 세번째는 다음과 같습니다. \nuser: 스쿼시 하고싶다. \nchatgpt: (왼팔을 들어 스쿼시 라켓을 잡는 모션을 보여준다.)\n\n행동은 몸을 움직이는 동작에 중점을 두고 작성해주세요.\n움직이는 동작을 정확하게 묘사하여 작성해주세요.\n예를 들어 머리 위로 손을 흔들고 싶다면 '손을 머리 위로 올려 흔든다'와 같은 문장을 작성해주세요.\n행동은 괄호 안에 작성하고 사용자 답변에 알맞은 답변과 함께 답해주세요.\n"
] |
2024-01-10 | kwarkmc/3DChatbot | Prompt~emotion.py | import openai
import sys
import re
openai.api_key = ''
prompt = f"""
당신의 임무는 사용자의 답변을 받아 그에 알맞은 답변과 행동을 생성하여 제공하는 것입니다.
행동은 애니메이션 캐릭터가 움직이는 동작으로 작성해주세요.
또한 행동을 묘사할 때 얼굴이나 표정에 대한 관련 묘사를 제외하고 주로 팔과 다리의 움직임 중심으로 자세하게 묘사하여 작성해주세요.
행동 예시는 다음과 같습니다.
user: 안녕!
chatgpt: (팔을 머리 위로 들고 흔들며 인사한다) 안녕!
user: 오늘 날씨 어때?
chatgpt: (오른손을 이마에 올리고 하늘을 본다.) 오늘 날씨는 좋아요.
user: 오늘 날씨 어때?
chatgpt: (런닝머신 위에서 뛰듯이 가벼운 발걸음으로 움직인다.) 오늘은 맑은 날씨라서 실외 활동하기 좋을 것 같아요.
user: 스쿼시 하고싶다.
chatgpt: (왼팔을 들어 스쿼시 라켓을 잡는 모션을 보여준다.)
그리고 해당 ()속에 있는 행동에서 신체의 움직임을 제외한 얼굴과 표정 묘사가 포함되어 있다면 다음 예시처럼 변환하여 작성해주세요.
예시는 다음과 같습니다.
chatgpt: (격한 표정으로 이마에 손을 얹고 씁쓸하게 미소를 짓는다)
변환된 예시
chatgpt: (이마에 손을 얹는다)
"""
# 감정 분류 위한 프롬프트
emotion_pro = f"""
당신의 임무는 chatgpt의 답변을 받아 답변에서 표현하고 있는 감정이 무엇인지 작성하는 것입니다.
감정 유형은 ['슬픔', '행복', '두려움', '놀람', '기쁨', '분노', '중립'] 총 7개입니다.
위 7개의 감정 유형 중에서 하나를 선택해 작성해주세요.
"""
messages = [{'role':'system', 'content':prompt}]
# 반복적인 대화
while True:
message = input('user: ')
if message:
messages.append(
{'role':'user', 'content':message},
)
chat = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo',
presence_penalty = 0.7,
frequency_penalty = 0.7,
max_tokens = 150,
messages = messages
)
reply = chat.choices[0].message.content
emotion = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo',
temperature = 0.5,
messages = [{'role':'system', 'content':emotion_pro},
{'role':'user', 'content':reply}]
)
emo = emotion.choices[0].message.content
print(f'chatgpt: {reply}')
print(f'emotion: {emo}')
# 답변에서 () 부분만 출력
#p = re.compile('\(([^)]+)')
#action = p.findall(reply)
#print('행동: ', action[0])
messages.append({'role':'assistant', 'content':reply})
'''
return response.choices[0].message.content
response = get_completion(prompt)
print(response)
''' | [
"\n당신의 임무는 사용자의 답변을 받아 그에 알맞은 답변과 행동을 생성하여 제공하는 것입니다. \n행동은 애니메이션 캐릭터가 움직이는 동작으로 작성해주세요.\n또한 행동을 묘사할 때 얼굴이나 표정에 대한 관련 묘사를 제외하고 주로 팔과 다리의 움직임 중심으로 자세하게 묘사하여 작성해주세요. \n\n행동 예시는 다음과 같습니다. \nuser: 안녕!\nchatgpt: (팔을 머리 위로 들고 흔들며 인사한다) 안녕! \n\nuser: 오늘 날씨 어때?\nchatgpt: (오른손을 이마에 올리고 하늘을 본다.) 오늘 날씨는 좋아요. \n\nuser: 오늘 날씨 어때? \nchatgpt: (런닝머신 위에서 뛰듯이 가벼운 발걸음으로 움직인다.) 오늘은 맑은 날씨라서 실외 활동하기 좋을 것 같아요. \n\nuser: 스쿼시 하고싶다. \nchatgpt: (왼팔을 들어 스쿼시 라켓을 잡는 모션을 보여준다.) \n\n그리고 해당 ()속에 있는 행동에서 신체의 움직임을 제외한 얼굴과 표정 묘사가 포함되어 있다면 다음 예시처럼 변환하여 작성해주세요. \n예시는 다음과 같습니다. \nchatgpt: (격한 표정으로 이마에 손을 얹고 씁쓸하게 미소를 짓는다)\n\n변환된 예시\nchatgpt: (이마에 손을 얹는다)\n\n",
"\n당신의 임무는 chatgpt의 답변을 받아 답변에서 표현하고 있는 감정이 무엇인지 작성하는 것입니다. \n\n감정 유형은 ['슬픔', '행복', '두려움', '놀람', '기쁨', '분노', '중립'] 총 7개입니다. \n위 7개의 감정 유형 중에서 하나를 선택해 작성해주세요.\n"
] |
2024-01-10 | Open-Technology-Foundation/dejavu.ai | dv | #!/usr/bin/python3
"""
DéjàVu GPT Terminal Chatbot and Scripting
Gary Dean [email protected]
https://github.com/Open-Technology-Foundation/dejavu.ai
#: git clone https://github.com/Open-Technology-Foundation/dejavu.ai /tmp/dejavu \
&& /tmp/dejavu/dv.install
"""
# pylint: disable=global-statement
# pylint: disable=wildcard-import
# pylint: disable=line-too-long
# pylint: disable=wrong-import-position
# pylint: disable=invalid-name
# pylint: disable=broad-exception-caught
# pylint: disable=multiple-statements
import os
import datetime
import pytz
import time
import random
import re
import readline
import signal
import subprocess
import sys
import textwrap
import openai
import tiktoken
from colorama import Fore, Style
"""
Script name and directory is derived from actual basename of this script (argv0).
"""
ScriptName = os.path.realpath(sys.argv.pop(0))
ScriptDir = os.path.dirname(ScriptName)
ScriptName = os.path.basename(ScriptName)
""" If the script direcory is not present in the current PATH, then append it. """
if ScriptDir not in sys.path: sys.path.append(ScriptDir)
from dejavu_std import *
from awesome_prompts import *
""" Raw stdout Output flag; True == no color, default is False. """
RawOutput = False
""" The local user home directory is HOME+/.+ScriptName. It is created unconditionally. """
dvHome = f'{HOME}/.{ScriptName}'
os.makedirs(dvHome, exist_ok=True)
""" If the file dvHome/default.dv does not exist, then use this as a flag
that the directory is empty and copy all example .dv scripts to this directory. """
if not os.path.exists(dvHome + '/default.dv'):
printinfo(f'Welcome to DéjàVu, {USER.title()}.')
if not copy_files_recursive(ScriptDir, dvHome, '*.dv', verbose=True):
printerr('Default files copy error.')
sys.exit()
string = readfile(dvHome + '/default.dv')
string = re.sub(r'\n!USER_NAME \S+', '\n!USER_NAME ' + USER.upper(), string)
string = re.sub(r'\n/USER_NAME \S+', '\n/USER_NAME ' + USER.upper(), string)
writefile(dvHome + '/default.dv', string)
""" Define cannonical repository URLs for dejavu """
REPOSITORY = 'https://github.com/Open-Technology-Foundation/dejavu.ai.git'
REPOSITORY_VERSION = f'https://raw.githubusercontent.com/Open-Technology-Foundation/dejavu.ai/master/dejavu.version?xyz=420{int(random.random()*1000000)}'
""" Dejavu Version Check Flag, default False. When set, this flag will trigger an install/upgrade process from the above repository URL. """
UpdateCheck = False
# Process command line and define ConvFile to use
Version = readfile(f'{ScriptDir}/dejavu.version').strip()
# ----------------------------------------------------------------------------
def find_dv_file(dvfilename: str, **kwargs) -> str:
"""
Finds/Creates fqfn for filenames with extension .dv.
If not mustexist and file does not exist
then defaults to sys.path[0]+'/'+filename.ext
and creates file with contents of /usr/share/dejavu.ai/default.dv.
Return '' if fail.
"""
mustexist = kwargs.get('mustexist', True)
searchpaths = kwargs.get('searchpaths', [dvHome, './', HOME])
dvfilename = find_file(dvfilename, mustexist=mustexist, searchpaths=searchpaths, ext='.dv')
if len(dvfilename) == 0:
return ''
if not os.path.exists(dvfilename):
try:
dvtext = readfile(ScriptDir + '/default.dv')
dvtext = re.sub(r'\n!USER_NAME \S+', '\n!USER_NAME ' + USER.upper(), dvtext)
dvtext = re.sub(r'\n/USER_NAME \S+', '\n/USER_NAME ' + USER.upper(), dvtext)
writefile(dvfilename, dvtext)
except:
printerr('Could not create script ' + dvfilename)
return ''
printinfo('New dv script ' + dvfilename + ' created.')
return dvfilename
ConvFile = ''
cmdTypeAhead = []
argvTypeAhead = []
Instructions = []
cmdEcho = True
Verbose = True
cmdExit = False
AutoSave = False
# ----------------------------------------------------------------------------
def read_dvfile(dvfile: str) -> bool:
""" Read a .dv script file. """
global cmdTypeAhead, argvTypeAhead, Instructions, SYSTEM_NAME
cmdTypeAhead = []; Instructions = []
dvfile = find_dv_file(dvfile, mustexist=True, ext='.dv')
if len(dvfile) == 0:
printerr(f'Script "{dvfile}" does not exist.')
return False
lne = readfile(dvfile)
Lines = lne.split('\n')
lne = ''
while len(Lines) > 0:
line = Lines.pop(0).rstrip()
if not line:
Instructions.append(''); continue
if line[0] == '#':
Instructions.append(line); continue
# handle \ line continuations
while line[-1] == '\\' and len(Lines) > 0:
line = line[0:-1] + Lines.pop(0).rstrip()
lne = line.rstrip('\r\n')
if not lne: lne = ''; continue
if lne[0] == '!' or lne[0] == '/':
alne = lne.split()
if (alne[0] == '/prompt' or alne[0] == '!prompt') and len(alne) > 1:
if alne[1] != '"""':
Instructions.append('/prompt ' + ' '.join(alne[1:]))
lne = ''
continue
promptstr = ''
while len(Lines) > 0:
line = Lines.pop(0).rstrip()
if line == '"""':
Instructions.append('/prompt ' + promptstr)
break
promptstr += line + '\\n'
elif alne[0] == '/instruction' or alne[0] == '!instruction' and len(alne) > 1:
if alne[1] != '"""':
Instructions.append('/instruction ' + ' '.join(alne[1:]))
lne = ''
continue
instr = ''
while len(Lines) > 0:
line = Lines.pop(0).rstrip()
if line == '"""':
Instructions.append('/instruction ' + instr)
break
instr += line + '\\n'
else:
Instructions.append(' '.join(alne))
lne = ''
continue
Instructions.append('/instruction ' + lne)
lne = ''
# end loop processing
lne = lne.rstrip('\r\n')
if lne: Instructions.append('/instruction ' + lne)
cmdTypeAhead = Instructions.copy()
cmdTypeAhead = [x for x in cmdTypeAhead if not x.startswith('#') and x]
cmdTypeAhead.insert(0, '/echo off')
if Verbose:
cmdTypeAhead.append('/status short')
cmdTypeAhead.append('/echo on')
if len(argvTypeAhead) > 0:
for lne in argvTypeAhead: cmdTypeAhead.append(lne)
argvTypeAhead = []
SYSTEM_NAME = os.path.splitext(os.path.basename(dvfile))[0].replace(' ', '_').upper()
return True
# ----------------------------------------------------------------------------
def dvUsage():
""" dv short invocation help """
print('DéjàVu - GPT Terminal vs ' + Version)
print("Usage: " + ScriptName + f""" [-vqlfxuV] [-c cmd] [dvfile]
Where 'dvfile' is a DéjàVu script file.dv.
Defaults to '~/.dv/default.dv'
-l|--list List all DéjàVu scripts in '~/.dv/'.
-a|--autosave on|off
If on, upon exit, append current conversation to current dv script. Default is off.
-c|--cmd 'cmd' Execute 'cmd' on entry to DéjàVu.
-x|--exit Immediately exit DéjàVu after first command has been executed.
-v|--verbose Verbose on. Default is on.
-q|--quiet Verbose off.
-V|--version Print DéjàVu version.
-C|--no-color Do not use color. Default is to use color.
-u|--upgrade Upgrade DéjàVu from git repository.
Git repository is set to:
{REPOSITORY}
--help Full Help manpages.
""")
# argv processing ------------------------------------------------------------
updateDV = False
while len(sys.argv) > 0:
sysargv = sys.argv.pop(0).strip()
if not sysargv: continue
if sysargv in ['-u', '--upgrade', '--update']:
updateDV = True
elif sysargv in ['-V', '--version']:
print(f'{ScriptName} vs {Version}')
sys.exit(0)
elif sysargv in ['-v', '--verbose']:
Verbose = 1; cmdEcho = 1
elif sysargv in ['-q', '--quiet']:
Verbose = 0; cmdEcho = 0
elif sysargv in ['-l', '--list']:
printinfo(f'Dejavu script files in {dvHome}')
for listfile in os.listdir(dvHome):
if listfile.endswith('.dv'):
filesize = os.stat(f'{dvHome}/{listfile}').st_size / 1024
modified_date = datetime.datetime.fromtimestamp(os.stat(dvHome + '/' + listfile).st_mtime).strftime('%Y-%m-%d %H:%M:%S')
printstd(f'{listfile:12s} {filesize:7.2f}KB {modified_date:20s}')
sys.exit(0)
elif sysargv in ['-c', '--cmd']:
if len(sys.argv) > 0:
argvparam = sys.argv.pop(0).strip()
if argvparam[0] == '\\': argvparam = argvparam[1:]
argvTypeAhead.append(argvparam)
else:
printerr('Command was not specified for ' + sysargv)
sys.exit(1)
elif sysargv in ['-a', '--autosave']:
AutoSave = True
elif sysargv in ['-x', '--exit']:
Verbose = False
cmdExit = True
elif sysargv in ['-C', '--no-color']:
useColor = False
RawOutput=True
elif sysargv in ['-h', '-?']:
dvUsage()
sys.exit()
elif sysargv == '--help':
os.execvp('man', ['man', ScriptName])
sys.exit()
# de-aggregate aggregated short options
elif re.match(r'^-[caCfvqVhuxl]', sysargv):
sys.argv = [''] + [f'-{c}' for c in sysargv[1:]] + sys.argv
elif re.match(r'^--', sysargv):
printerr('Invalid option ' + sysargv)
sys.exit(1)
elif sysargv[0:1] == '-':
printerr('Invalid option ' + sysargv)
sys.exit(1)
else:
ConvFile = sysargv
"""
Intercept update instruction from command line
and update this program from the repository.
Exit after finishing.
"""
if updateDV:
tempdir_upgrade = tempname('upgrade', '')
os.makedirs(tempdir_upgrade, exist_ok=True)
os.chdir(tempdir_upgrade)
qqq = '-v' if Verbose else '-q'
subprocess.call(['git', 'clone', qqq, REPOSITORY, tempdir_upgrade])
installcmd = f'{tempdir_upgrade}/{ScriptName}.install'
installargs = [installcmd, qqq, '-a' if qqq == '-q' else '']
try:
os.execvp(installcmd, installargs)
except:
sys.exit(0) # should never get here.
# Conversation script validation and default ---------------------------------
"""
If a Conversation .dv Script has not been specified on the command line,
then ConvFile defaults to the file default.dv in the dvHome directory.
"""
if len(ConvFile) == 0:
ConvFile = dvHome + '/default.dv'
try:
ConvFile = find_dv_file(ConvFile, mustexist=False)
if len(ConvFile) == 0:
printerr(f'DéjàVu script "{ConvFile}" could not be opened.')
sys.exit(1)
except Exception as e:
printerr(f'DéjàVu script "{ConvFile}" could not be created.', str(e))
sys.exit(1)
""" The ConvFile basename. """
ConvFileName = os.path.basename(ConvFile)
# ----------------------------------------------------------------------------
def getOpenAIKeys() -> bool:
""" # Get OpenAI API keys """
try:
openai.api_key = os.environ['OPENAI_API_KEY']
except KeyError:
printerr('Environment variable OPENAI_API_KEY is not defined.')
printinfo('Go to https://openai.com/api for your own API key.',
' $ export OPENAI_API_KEY="your_key"',
'If you set up your openai account as an organization, you will',
'also have to set OPENAI_ORGANIZATION_ID:',
' $ export OPENAI_ORGANIZATION_ID="your_organization"',
'Both these environment variables should be set in your ~/.bashrc',
'file or in /etc/bash.bashrc.')
sys.exit(1)
try:
openai.organization = os.environ['OPENAI_ORGANIZATION_ID']
except KeyError:
openai.organization = ''
return True
getOpenAIKeys()
"""
#!/usr/bin/python3
import os
import sys
class Agent:
SYSTEM_NAME = 'SYSTEM_AGENT'
USER_NAME = 'AGENT_USER'
AI_NAME = 'AI_ASSISTANT'
prompt = ''
engine = 'gpt-3.5-turbo'
token_limit = 4096 if (engine in [ 'text-davinci-003', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301']) else 2048
temperature = 1.0
top_p = 1.0
response_tokens = int(token_limit/2)
freq_pen = 0.0
pres_pen = 0.0
stop = []
def __init__(self):
pass
def assign(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
else:
print(f'{str(key)} is not a valid attribute of the `Agent` class.', file=sys.stderr)
agent = Agent()
agent.assign(engine='text-davinci-003', temperature=0.8)
print(agent.engine) # Output: text-davinci-003
print(agent.temperature) # Output: 0.8
agent.assign(invalid_key='test')
"""
SYSTEM_NAME = 'SYSTEM_AGENT'
USER_NAME = 'AGENT_USER'
AI_NAME = 'AI_ASSISTANT'
prompt = ''
Prompt = prompt
engine = 'gpt-3.5-turbo'
token_limit = 4000
response_tokens = int(token_limit/2)
temperature = 1.0
top_p = 1.0
freq_pen = 0.0
pres_pen = 0.0
stop = ''
if not read_dvfile(ConvFile):
printerr('Error reading ' + ConvFile)
sys.exit(1)
historyFile = initHistory(ConvFile)
conversation = []
text_block = ''
# ----------------------------------------------------------------------------
def num_tokens_from_string(gptstring: str, encoding_name: str='gpt2') -> int:
"""Returns the number of tokens in a text gpt string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(gptstring))
return num_tokens
# ----------------------------------------------------------------------------
def gpt35_completion(gprompt: str, gconversation: list=[], **kwargs):
global token_limit, used_tokens
gengine = kwargs.get('engine', 'gpt-3.5-turbo')
gtemperature= float(max(0.0, min(1.0, kwargs.get('temperature', 0.7))))
gtop_p = float(max(0.0, min(1.0, kwargs.get('top_p', 0.9))))
gtokens = int(kwargs.get('tokens', -1))
gfreq_pen = float(max(-2.0, min(2.0, kwargs.get('freq_pen', 0.2))))
gpres_pen = float(max(-2.0, min(2.0, kwargs.get('pres_pen', 0.65))))
gstop = kwargs.get('stop', [])
gtimeout = int(max(2, min(99420, kwargs.get('timeout', -1))))
gprompt = role_tag_replace(gprompt.encode(encoding='ASCII', errors='ignore').decode())
used_tokens = num_tokens_from_string(gprompt)
# If gtokens (aka max_tokens, or response_tokens) is <=0,
# then gtokens is calculated to max amount.
if gtokens <= 0:
gtokens = max(16, (token_limit - 200) - used_tokens)
if (gtokens + used_tokens) > token_limit:
printerr(f'Too many response tokens requested ({gtokens}).', 'Try reducing /tokens, or deleting or summarising some of your conversation.')
return ''
if Verbose and cmdEcho:
printinfo(f'{used_tokens} total tokens in prompt')
printlog(f'gtokens={gtokens}, engine={engine}, conv={len(gconversation)}')
# gprompt = role_tag_replace(gprompt.encode(encoding='ASCII', errors='ignore').decode())
# chats for chatQA models have to be structured differently
if gengine in ['gpt-3.5-turbo', 'gpt-4'] and len(gconversation) > 0:
messages = [{"role": "system", "name": SYSTEM_NAME, "content": gprompt}]
for conv in gconversation:
if conv.startswith('<<AI_NAME>>:'):
role = 'assistant'
name = AI_NAME
conv = conv[12:].lstrip()
elif conv.startswith('<<USER_NAME>>:'):
role = 'user'
name = USER_NAME
conv = conv[14:].lstrip()
elif conv.startswith('<<SYSTEM>>:'):
role = 'system'
name = SYSTEM_NAME
conv = conv[11:].lstrip()
else:
role = 'system'
name = SYSTEM_NAME
messages.append({"role": role, "name": name, "content": role_tag_replace(conv)})
printlog(json.dumps(messages))
try:
response = openai.ChatCompletion.create(
model=gengine,
messages=messages,
temperature=gtemperature,
max_tokens=gtokens,
top_p=gtop_p,
frequency_penalty=gfreq_pen,
presence_penalty=gpres_pen,
stop=gstop,
timeout=gtimeout)
except Exception as gpte:
printerr('GPT experienced an error.', str(gpte))
return ''
for choice in response.choices:
if "text" in choice: return choice.text
# If no response with text is found, return the first response's content (which may be empty)
return response.choices[0].message.content
else:
try:
gpt3_response = openai.Completion.create(
engine=gengine,
prompt=gprompt,
temperature=gtemperature,
max_tokens=gtokens,
top_p=gtop_p,
frequency_penalty=gfreq_pen,
presence_penalty=gpres_pen,
stop=gstop,
timeout=90)
except Exception as gpte:
printerr('GPT experienced an error.', str(gpte))
return ''
return gpt3_response['choices'][0]['text'].strip('\n')
# ---------------------------------------------------------------------------
def cmd_help():
""" dejavu command help """
printinfo('DéjàVu System Commands', style=Style.BRIGHT)
json_data = json.loads(readfile(ScriptDir + '/dejavu-command-help.json'))
rowcount = 1
for cmdhelp in json_data:
if (rowcount + len(cmdhelp)) >= getScreenRows():
rowcount = 0
if is_terminal(sys.stdout):
try:
input(Style.RESET_ALL + 'More...')
print('\r\x1b[2K', end='')
except KeyboardInterrupt:
print('^C', file=sys.stderr)
break
if len(cmdhelp) == 1:
printinfo(cmdhelp[0], style=Style.BRIGHT)
rowcount += 1
continue
printinfo(f' {cmdhelp[0]:17s} {cmdhelp[1]}')
rowcount += 1
for chelp in cmdhelp[2:]:
printinfo(f' %17s {chelp}' % '')
rowcount += 1
# ---------------------------------------------------------------------------
def cmdstatus(showall: bool = False):
""" Print current parameters """
# global Prompt, conversation
global UpdateCheck, token_limit, used_tokens
printinfo(f'DéjàVu GPT Terminal vs {Version} | Enter ! for help.')
def pp(pref: str, suff: str):
printinfo(f'%16s: {suff}' % pref)
pp( 'Agent', (SYSTEM_NAME + (' '+ConvFile.replace(HOME, '~')) if showall else SYSTEM_NAME))
pp( 'User Name', USER_NAME)
pp( 'Assistant Name', AI_NAME)
pp( 'AI Engine', engine)
pp( 'AutoSave', 'On' if AutoSave else 'Off')
if showall:
token_limit = 4000 if (engine in [ 'text-davinci-003', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301', 'gpt-4']) else 2000
used_tokens = num_tokens_from_string(Prompt + ''.join(conversation))
pp( 'Token Limit', token_limit)
pp( 'Response Tokens', response_tokens)
pp( 'Tokens Used', int(used_tokens))
pp( 'Tokens Left', int(token_limit - used_tokens))
pp( 'Temperature', temperature)
pp( 'Top_p', top_p)
pp( 'Frequency', freq_pen)
pp( 'Presence', pres_pen)
pp( 'Stop', str(stop) if len(stop) else 'None')
pp( 'Command Echo', 'On' if cmdEcho else 'Off')
pp( 'Shell', os.path.basename(SHELL))
pp( 'Editor', os.path.basename(EDITOR))
pp( 'Browser', os.path.basename(BROWSER))
pp( 'Command History', historyFile.replace(HOME, '~'))
if not UpdateCheck:
try:
rstat = requests.get(REPOSITORY_VERSION, timeout=2, headers={'User-Agent': 'Wget/1.21.2', 'Accept': '*/*', 'Accept-Encoding': 'identity', 'Connection': 'Keep-Alive'})
data = str(rstat.text).strip()
if Version < data:
pp('Update', f'Version {data} of DéjàVu is now available.')
pp('', f"Run '{ScriptName} -q --update' to update.")
UpdateCheck = True
except:
pass
def role_tag_replace(text: str) -> str:
"""
Replace role tag with role name.
global SYSTEM_NAME, AI_NAME, USER_NAME
"""
return text.replace('<<AI_NAME>>', AI_NAME).replace('<<USER_NAME>>', USER_NAME).replace('<<SYSTEM_NAME>>', SYSTEM_NAME)
def role_name_replace(text: str) -> str:
"""
Replace role name with role tag.
global SYSTEM_NAME, AI_NAME, USER_NAME
"""
return text.replace(AI_NAME, '<<AI_NAME>>').replace(USER_NAME, '<<USER_NAME>>').replace(SYSTEM_NAME, '<<SYSTEM_NAME>>')
# ---------------------------------------------------------------------------
def PromptReplace(conversationText: str='', AIs: str='<<AI_NAME>>', AIr: str='', USs: str='<<USER_NAME>>', USr: str='', SYs: str='<<SYSTEM_NAME>>', SYr: str='') -> str:
"""
Replace <<>> markers in prompt and conversation
with the appropriate text
"""
# global Prompt, AI_NAME, USER_NAME
if not SYr: SYr = SYSTEM_NAME
if not AIr: AIr = AI_NAME
if not USr: USr = USER_NAME
newp = Prompt + ('\n' if Prompt[-1] != '\n' else '') \
+ conversationText
newp = newp.replace(AIs, AIr, -1).replace(USs, USr, -1).replace(SYs, SYr, -1)
return newp.strip() + '\n'
# ----------------------------------------------------------------------------
def autoSave(dv_filename: str, auto=True):
""" Autosave current chat """
# global conversation, Instructions,
if cmdEcho: printinfo(('Auto' if auto else '') + 'Saving ' + dv_filename, end='')
autofound = False
autosave_file = tempname('autosave', '.dv')
writefile(autosave_file, '', 'w')
for instr in Instructions:
# comments or blank lines, just insert now
if len(instr) == 0 or instr[0] == '#':
writefile(autosave_file, f'{instr}\n', 'a')
continue
# !cmd forgiveness, but standardize on /cmd.
if instr[0] == '!': instr = '/' + instr[1:]
instr_tok = tokenize(instr)
if len(instr_tok) == 0:
writefile(autosave_file, '\n', 'a')
continue
instr_tok[0] = instr_tok[0].lower()
if instr_tok[0].startswith('/temp'):
writefile(autosave_file, f'/temperature {temperature}\n', 'a')
continue
if instr_tok[0].startswith('/engi'):
writefile(autosave_file, f'/engine {engine}\n', 'a')
continue
if instr_tok[0].startswith('/ai_n'):
writefile(autosave_file, f'/ai_name {AI_NAME}\n', 'a')
continue
if instr_tok[0].startswith('/user'):
writefile(autosave_file, f'/user_name {USER_NAME}\n', 'a')
continue
if instr_tok[0].startswith('/top_p'):
writefile(autosave_file, f'/top_p {top_p}\n', 'a')
continue
if instr_tok[0].startswith('/freq'):
writefile(autosave_file, f'/freq_pen {freq_pen}\n', 'a')
continue
if instr_tok[0].startswith('/pres'):
writefile(autosave_file, f'/pres_pen {pres_pen}\n', 'a')
continue
if instr_tok[0].startswith('/auto'):
writefile(autosave_file, f'/autosave {("On" if AutoSave else "Off")}\n', 'a')
autofound = True
continue
# if instr_tok[0].startswith('/echo'):
# writefile(autosave_file, f'/echo {("On" if cmdEcho else "Off")}\n', 'a')
# continue
if instr_tok[0].startswith('/stop'):
writefile(autosave_file, f'/stop {str(stop)}\n', 'a')
continue
if instr_tok[0].startswith('/prom') or instr_tok[0].startswith('!prom'):
instr = '/prompt """\n' \
+ ' '.join(instr_tok[1:]).replace('\\n', '\n') \
+ '\n"""\n'
writefile(autosave_file, instr, 'a')
continue
# ignore /conversation for now in Instructions Section
if instr_tok[0].startswith('/conv') or instr_tok[0].startswith('!conv'):
continue
# an explcit instruction to gpt
if instr_tok[0].startswith('/inst'):
instr = '/instruction """\n' \
+ ' '.join(instr_tok[1:]).replace('\\n', '\n') \
+ '\n"""\n'
writefile(autosave_file, instr, 'a')
continue
# a generic instruction or other /command
writefile(autosave_file, instr + '\n', 'a')
if not autofound:
writefile(autosave_file, f'/autosave {("On" if AutoSave else "Off")}\n', 'a')
# now write out the complete new conversation
for instr in conversation:
writefile(autosave_file, '/conversation ' + instr.replace('\n', '\\n') + '\n', 'a')
try:
# rename files
if os.path.exists(dv_filename):
os.replace(dv_filename, dv_filename + '~')
# replace current script with new script
os.rename(autosave_file, dv_filename)
except Exception as saveerr:
printerr(f'Error renaming {autosave_file} to {dv_filename}: {str(saveerr)}')
# ----------------------------------------------------------------------------
def orderly_exit():
""" Exit script nicely """
# global AutoSave, Verbose
if AutoSave: autoSave(ConvFile)
if Verbose:
print('\r\x1b[2K' if UseColor() else '', end='')
printinfo(f'Exiting {ScriptName} {ConvFile}')
print(Style.RESET_ALL if UseColor() else '', end='')
sys.exit(0)
# ----------------------------------------------------------------------------
if __name__ == '__main__':
conversation = []
while True:
if len(cmdTypeAhead) == 0 and cmdExit: sys.exit(0)
getScreenColumns()
used_tokens = num_tokens_from_string(Prompt + ''.join(conversation))
if len(cmdTypeAhead) > 0:
userInput = cmdTypeAhead.pop(0)
if userInput[0].rstrip() == '#' or len(userInput.strip()) == 0: continue
if cmdEcho and (userInput[0:5] != '/echo' and userInput[0:5] != '!echo'):
printstd(USER_NAME + ':', color=Fore.YELLOW + Style.BRIGHT)
print(userInput)
else:
cmdEcho = True
printstd(f'{len(conversation)+1:d}. {USER_NAME}:', color=Fore.YELLOW + Style.BRIGHT)
try:
userInput = input().lstrip()
except KeyboardInterrupt:
if input_key('Exit Dejavu?') == 'y': orderly_exit()
continue
if len(userInput.strip()) == 0 or userInput[0].rstrip() == '#': continue
# Process /command
if userInput[0] == '!' or userInput[0] == '/':
command = userInput[1:].strip()
tok = tokenize(command)
# help
if len(tok) == 0:
cmd_help()
continue
tok[0] = tok[0].lower()
# /help|/?|/!|//
if tok[0] in ['help', '?', '/']:
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
subprocess.run(['man', 'dv'], check=False)
except KeyboardInterrupt:
print('^C', file=sys.stderr)
pass
except Exception as e:
printerr('Error running `man`.', e)
signal.signal(signal.SIGINT, signal.SIG_DFL)
continue
# /exit
if tok[0] in ['exit', 'quit']:
orderly_exit()
continue
# /chdir|/cd
if tok[0] in ['cd', 'chdir', 'chdi']:
if len(tok) > 1:
if tok[1] == 'list':
while True:
directories = get_directories()
directories.append('..')
tmp = selectList(directories, 'ChDir ')
if not tmp: break
try: os.chdir(tmp)
except Exception as e:
printerr(f'Could not change directory into "{tmp}"', e)
else:
try: os.chdir(' '.join(tok[1:]))
except Exception as e:
printerr(f'Could not change directory into "{" ".join(tok[1:])}"', e)
if cmdEcho:
printinfo('Current directory is ' + os.getcwd())
continue
# /syntax lang func
if tok[0] in ['synt', 'syntax']:
if len(tok) < 3:
printerr('Requires options')
printinfo('/syntax language cmd|function', 'eg, /syntax php strpos')
continue
cmdTypeAhead = [f'In {tok[1]}, list complete synopsis, syntax, parameters, options, keywords, and usage for "{tok[2]}", giving coded examples: ']
continue
# /echo [on|off]
if tok[0] == 'echo':
if len(tok) > 1:
tok[1] = tok[1].lower()
if tok[1] in ['1', 'on', 'true']:
cmdEcho = True
elif tok[1] in ['0', 'off', 'false']:
cmdEcho = False
else:
printerr('Invalid argument')
else: printinfo('Command Echo is ' + str('On' if cmdEcho else 'Off'))
continue
# /rawoutput [on|off*]
if tok[0] in ['rawo', 'rawoutput']:
if len(tok) > 1:
tok[1] = tok[1].lower()
if tok[1] in ['1', 'on', 'true']:
RawOutput = True
elif tok[1] in ['0', 'off', 'false']:
RawOutput = False
else:
printerr('Invalid argument')
else: print('# Raw Output is ' + str('On' if RawOutput else 'Off'), end='\n')
continue
# /autosave [on|off]
if tok[0] in ['auto', 'autosave']:
if len(tok) > 1:
tok[1] = tok[1].lower()
if tok[1] in ['1', 'on', 'true']:
AutoSave = True
elif tok[1] in ['0', 'off', 'false']:
AutoSave = False
else:
printerr('Invalid argument')
else: printinfo('AutoSave is ' + str('On' if AutoSave else 'Off'))
continue
# /scripts /agents /files
if tok[0] in ['scri', 'scripts', 'file', 'files', 'agen', 'agents']:
script = selectFile(['.', dvHome], '*.dv', 'Select Script')
if not script: continue
try:
subprocess.run(f'{EDITOR} "{script}"', shell=True, executable=SHELL, check=False)
if input_key(f'Run {script}?') != 'y': continue
cmdTypeAhead = [f'/run {script}']
except KeyboardInterrupt:
print('^C', file=sys.stderr)
continue
except Exception as e:
printerr('ERROR in editor.', str(e)); continue
continue
# /edit
if tok[0] == 'edit':
modify_datestamp = os.path.getmtime(ConvFile)
try:
subprocess.run(f'{EDITOR} {ConvFile}', shell=True, executable=SHELL, check=False)
except Exception as e:
printerr('Edit error ' + str(e)); continue
if modify_datestamp != os.path.getmtime(ConvFile):
if input_key(f'Re-Load {ConvFile}?') == 'y':
cmdTypeAhead = ['/run ' + ConvFile]
continue
# /exec||!! [program [args]]
if tok[0] in ['exec', '!']:
try:
if len(tok) == 1:
execstr = SHELL
os.environ['PS1'] = "\rDejaVu:" + os.environ['PS1']
else:
execstr = ' '.join(tok[1:])
subprocess.run(execstr, shell=True, executable=SHELL, check=False)
except KeyboardInterrupt:
print('^C', file=sys.stderr)
continue
except Exception as e:
printerr('Exec error ' + str(e))
continue
# /status [short|long]
if tok[0] in ['stat', 'status']:
longstatus = True
if len(tok) > 1:
if tok[1] == 'short': longstatus = False
cmdstatus(longstatus)
continue
# /username [name]
if tok[0] in ['user', 'user_name']:
if len(tok) > 1:
tmp = re.sub(r'[^a-zA-Z0-9_-]', '', '-'.join(tok[1:])).strip().upper()
if len(tmp) < 4 or len(tmp) > 16:
printerr('Invalid length in user_name "' + tmp + '". Min 4, Max 16.')
continue
USER_NAME = tmp
if cmdEcho: printinfo('USER_NAME is now ' + USER_NAME)
continue
# /ai_name [name]
if tok[0] in ['ai_n', 'ai_name']:
if len(tok) > 1:
tmp = re.sub(r'[^a-zA-Z0-9_-]', '', '-'.join(tok[1:])).strip().upper()
if len(tmp) < 4 or len(tmp) > 16:
printerr('Invalid length in ai_name "' + tmp + '". Min 4, Max 16.')
continue
AI_NAME = tmp
if cmdEcho: printinfo('AI_NAME is now ' + AI_NAME)
continue
# /engine [list|engine|update]
if tok[0] in ['engi', 'engine', 'engines']:
if tok[0] == 'engines' and len(tok) == 1: tok.append('list')
if len(tok) > 1:
if tok[1] == 'list' or tok[1] == 'select':
with open(f'{ScriptDir}/engines.json') as f:
data = json.load(f)
gptengines = []
for item in data['data']:
gptengines.append(item['id'])
gptengines.sort()
tmp = selectList(gptengines, 'Select GPT Engine')
gptengines = None
if len(tmp) > 0:
engine = tmp
elif tok[1] == 'update':
printinfo('Updating GPT engine list...')
tmpfile = tempname('engines.list')
enginesfile = f'{ScriptDir}/engines.json'
try:
subprocess.run(f'openai api engines.list',
stdout=open(tmpfile, 'w'),
shell=True, executable=SHELL, check=False)
os.rename(enginesfile, enginesfile+'~')
os.rename(tmpfile, enginesfile)
printinfo('GPT engine list updated.')
except:
printerr('GPT engine list not updated.')
pass
continue
else:
engine = tok[1]
token_limit = 4000 if (engine in [ 'text-davinci-003', 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301', 'gpt-4']) else 2000
if response_tokens > (token_limit-100):
response_tokens = int(token_limit/2)
if cmdEcho:
printinfo(f'Engine is now {engine}')
printinfo(f'Max Allowed Tokens: {token_limit}, Max Response Tokens: {response_tokens}')
continue
# /temperature [1.0]
if tok[0] in ['temp', 'temperature']:
if len(tok) > 1 and is_num(tok[1]):
temperature = max(0.0, min(1.0, float(tok[1])))
if cmdEcho: printinfo('Temperature is now ' + str(temperature))
continue
# /top_p [1.0]
if tok[0] in ['top_', 'top_p']:
if len(tok) > 1 and is_num(tok[1]):
top_p = max(0.0, min(1.0, float(tok[1])))
if cmdEcho: printinfo('Top_p is now ' + str(top_p))
continue
# /tokens [number]
if tok[0] in ['toke', 'tokens']:
if len(tok) > 1 and is_num(tok[1]):
if int(tok[1]) > 0:
response_tokens = max(16, min(token_limit - used_tokens - 16, int(tok[1])))
else:
response_tokens = -1
if cmdEcho:
printinfo('Max Tokens is now ' + str(response_tokens))
continue
# /freq_pen [0.0]
if tok[0] in ['freq', 'freq_pen']:
if len(tok) > 1 and is_num(tok[1]):
freq_pen = max(-2, min(2, float(tok[1])))
if cmdEcho: printinfo('Frequency Penalty is now ' + str(freq_pen))
continue
# /pres_pen [0.0]
if tok[0] in ['pres', 'pres_pen']:
if len(tok) > 1 and is_num(tok[1]):
pres_pen = max(-2, min(2, float(tok[1])))
if cmdEcho: printinfo('Presence Penalty is now ' + str(pres_pen))
continue
# /spacetime [location]
if tok[0] in ['spac', 'spacetime']:
temp = f"Today's date is {datetime.datetime.today().strftime('%Y-%m-%d')}, and current {USER_NAME} local time is {datetime.datetime.now((time.tzname[0] if time.timezone >= 0 else time.tzname[1])).strftime('%H:%M %Z')}."
if len(tok) > 1:
temp += f" Current {USER_NAME} location is {tok[1:]}."
Prompt = Prompt + '\n\n## SPACETIME\n\n' + temp + '\n'
# Add the response to the conversation list
conversation.append(f'<<AI_NAME>>: I acknowledge: {temp}')
continue
# /system [/append*|/insert|/replace prompt]
if tok[0] in ['syst', 'system']:
tok[0] = '/prompt'
# /prompt [/append*|/insert|/replace prompt]
if tok[0] in ['prom', 'prompt']:
if len(tok) > 1:
if tok[1].startswith('/appe') or tok[1].startswith('/add'):
Prompt = Prompt + '\n' \
+ ' '.join(tok[2:]).replace('\\n', '\n').strip()
elif tok[1].startswith('/inse') or tok[1].startswith('/insert'):
Prompt = ' '.join(tok[2:]).replace('\\n', '\n').strip() \
+ '\n' + Prompt
elif tok[1].startswith('/repl') or tok[1].startswith('/replace'):
Prompt = ' '.join(tok[2:]).replace('\\n', '\n').strip()
else:
Prompt = Prompt + '\n' \
+ ' '.join(tok[1:]).replace('\\n', '\n').strip()
elif cmdEcho: printinfo(Prompt, prefix='')
continue
# /conversation [conversation]
if tok[0] in ['conv', 'conversation']:
if len(tok) > 1:
conversation.append(' '.join(tok[1:]).replace('\\n', '\n'))
else:
for tmp in conversation:
printinfo(tmp, prefix='')
continue
# /list [0-0]
if tok[0] == 'list':
short = False
if len(tok) > 1:
if tok[1][0:4] == 'shor': short = True; tok.pop(1)
elif tok[1][0:4] == 'long': short = False; tok.pop(1)
if len(tok) < 2: tok.append('all')
rnge = int_list(tok[1:], 1, len(conversation), False)
if not rnge: continue
for rindex in rnge:
text = conversation[rindex - 1].replace('<<USER_NAME>>', USER_NAME).replace('<<AI_NAME>>', AI_NAME)
if text[0:len(USER_NAME) + 2] == USER_NAME + ': ':
printstd(f'{rindex:d}. {USER_NAME}:', color=Fore.YELLOW, style=Style.DIM)
text = text[len(USER_NAME) + 2:]
elif text[0:len(AI_NAME) + 2] == AI_NAME + ': ':
printstd(f'{rindex:d}. {AI_NAME}:', color=Fore.GREEN, style=Style.DIM)
text = text[len(AI_NAME) + 2:] + '\n\n'
for _line in text.splitlines():
if short:
printinfo(_line[0:ScreenColumns - 3] + '...', prefix='')
break
printinfo(textwrap.fill(_line, width=ScreenColumns), prefix='')
continue
# /clear [conversation*|prompt|all]
if tok[0] in ['clea', 'clear']:
if len(tok) == 1 or (tok[1] in ['conv', 'conversation']):
conversation = []
if cmdEcho: printinfo('All conversation has been cleared.')
elif tok[1] in ['prom', 'prompt']:
prompt = ''
Prompt = ''
if cmdEcho: printinfo('Prompt has been cleared.')
elif tok[1] == 'all':
prompt = ''
Prompt = ''
conversation = []
if cmdEcho: printinfo('Prompt and conversation have been cleared.')
continue
# /delete 0-0
if tok[0] in ['dele', 'delete']:
if len(tok) < 2:
printerr('Range not specified.')
continue
rnge = int_list(tok[1:], 1, len(conversation), True)
if not rnge: continue
i = int(0)
for rdel in rnge:
del conversation[rdel - 1]
i += 1
if cmdEcho: printinfo(f'{i:d} entries deleted')
continue
# /tldr [range]
if tok[0] in ['tldr', 'tl;dr']:
if len(tok) < 2: tok.append(str(len(conversation)))
rnge = int_list(tok[1:], 1, len(conversation), False)
if not rnge: continue
i = int(0)
for rtldr in rnge:
text = conversation[rtldr - 1]
text = text.replace('<<AI_NAME>>', AI_NAME).replace('<<USER_NAME>>', USER_NAME).replace('<<SYSTEM_NAME>>', SYSTEM_NAME)
try:
tldr_response = gpt35_completion(
text + '\n\nTL;DR: ',
[],
engine='text-davinci-003',
temperature=temperature,
top_p=top_p,
tokens=response_tokens,
freq_pen=freq_pen,
pres_pen=pres_pen,
stop=stop)
if len(tldr_response) == 0: continue
except:
printerr('GPT experienced an error. Possibly overloaded.')
continue
printstd(AI_NAME + ': ', color=Fore.GREEN)
tldr_response = 'TLDR'+ ('' if len(rnge) == 1 else f'[{str(i+1)}]') + ': ' + tldr_response
printinfo(tldr_response, prefix='')
conversation.append(f'<<AI_NAME>>: {tldr_response}')
i += 1
continue
# /save [script]
if tok[0] == 'save':
if len(tok) < 2: filename = ConvFile
else: filename = tok[1].replace('"', '').replace('"', '')
filename = find_dv_file(filename, mustexist=False)
if os.path.exists(filename):
if input_key(f'\n{filename} exists. Overwrite?') != 'y':
continue
autoSave(filename, False)
if cmdEcho: printinfo('Session saved to ' + filename)
continue
# /vars | /locals
if tok[0] in ['vars', 'locals']:
varsearch = tok[1] if len(tok) > 1 else ''
locals_list = [(key, value) for key, value in locals().items() if not callable(value)]
for key, value in locals_list:
value = str(value).replace('\n', '\\n')
if value[0:7] == '<module': continue
if varsearch and key[0:len(varsearch)] != varsearch: continue
print(f"{key}={value}")
print('sys.path=', sys.path)
print()
print('Environment:')
for env in ['USER', 'HOME', 'SHELL', 'EDITOR', 'BROWSER', 'TMPDIR', 'PATH', 'OPENAI_API_KEY', 'OPENAI_ORGANIZATION_ID']:
print(f'{env}={str(os.getenv(env))}')
continue
# /history [range]
if tok[0] in ['hist', 'history']:
if len(tok) > 1:
if is_num(tok[1][0]):
rnge = int_list(tok[1:], 1, readline.get_current_history_length(), False)
if not rnge: continue
for rhis in rnge:
cmdTypeAhead.append(readline.get_history_item(int(rhis)))
else:
printerr(f'Unknown parameter {tok[1]}.')
continue
history_length = readline.get_current_history_length()
prev_item = ''
for i in range(history_length, 0, -1):
item = readline.get_history_item(i)
if len(item) <= 2:
readline.remove_history_item(i - 1)
continue
if item == prev_item:
readline.remove_history_item(i - 1)
continue
prev_item = item
history_length = readline.get_current_history_length()
for i in range(1, history_length + 1):
print(i, readline.get_history_item(i))
continue
# /awesome [update]
if tok[0] in ['awes', 'awesome']:
curdir = os.getcwd()
os.chdir(ScriptDir)
awe_prompt = select_awesome_prompt(tok[0:])
os.chdir(curdir)
if len(awe_prompt) == 0: continue
print(awe_prompt)
userInput = awe_prompt
# /run [script]
elif tok[0] == 'run':
if len(tok) < 2:
script = selectFile(['.', dvHome], '*.dv', 'Select Script to Run')
if not script: continue
else:
script = find_dv_file(' '.join(tok[1:]), mustexist=True)
if len(script) == 0:
printerr(f'dvScript {script} does not exist.')
continue
try:
if AutoSave: autoSave(ConvFile)
readline.write_history_file(historyFile)
read_dvfile(script)
ConvFile = script
historyFile = initHistory(ConvFile)
except Exception:
printerr(f'Run failed for "{script}". Reloading "{ConvFile}".')
read_dvfile(ConvFile)
historyFile = initHistory(ConvFile)
continue
# /instruction [instruction]
elif tok[0] in ['inst', 'instruction']:
if len(tok) > 1:
userInput = ' '.join(tok[1:]).replace('\\n', '\n').strip()
else:
for tmp in Instructions:
if tmp[0:8] == '/prompt ' or tmp[0:8] == '!prompt ':
tmp = '/prompt """\n' + \
tmp[8:].replace('\\n', '\n') + \
'"""'
elif tmp[0:13] == '/instruction ' or tmp[0:13] == '!instruction ':
tmp = '/instruction """\n' + \
tmp[13:].replace('\\n', '\n') + '\n' + \
'"""'
printinfo(tmp, prefix='')
continue
# proceed to gpt
# /import [filename]
elif tok[0] in ['impo', 'import']:
if len(tok) < 2:
tmpfile = tempname('command', '.dv')
writefile(tmpfile, '')
while True:
try:
subprocess.run(f'{EDITOR} {tmpfile}', shell=True, executable=SHELL, check=False)
except Exception as e:
printerr('Import file error.', str(e))
userInput = ''
break
userInput = readfile(tmpfile)
if len(userInput) == 0: break
ynr = input_key(f'Execute instructions in {tmpfile}? Or re-edit?', ['y', 'n', 'r'], ['n'])
if ynr == 'y': break
if ynr == 'r': continue
userInput = ''
break
try:
os.remove(tmpfile)
os.remove(tmpfile + '~')
except FileNotFoundError: pass
if not userInput: continue
print('\n' + userInput.strip())
# proceed to gpt
else:
filename = tok[1]
if not os.path.exists(filename):
printerr(filename + ' does not exist.')
continue
if cmdEcho: printinfo('Importing from text file ' + filename)
userInput = readfile(filename).strip() + '\n'
# proceed to gpt
# /summarise [conv|prompt|*all]
elif tok[0] in ['summ', 'summarize', 'summarise']:
if len(tok) < 2: what = 'conversation'
else: what = tok[1]
if what[0:4] == 'prom' or what == 'prompt':
userInput = PromptReplace() \
+ f'\n\n{USER_NAME}: Write a detailed summary of all the above: '
elif what[0:4] == 'conv' or what == 'conversation':
userInput = text_block \
+ f'\n\n{USER_NAME}: Write a detailed summary of all the above: '
elif what == 'all':
userInput = PromptReplace(text_block) \
+ f'\n\n{USER_NAME}: Write a detailed summary of all the above: '
else:
printerr('Invalid option. Valid options for /summarize are prompt|conv|all.')
continue
# proceed to gpt
# Invalid command
else:
printerr('Invalid command: /' + command)
if Verbose: printinfo("/ or /help for command help.")
continue
# Prepend username to user input
conversation.append(f'<<USER_NAME>>: {userInput}')
# Aggregate the entire conversation
text_block = ('\n'.join(conversation)).strip('\n') \
+ '\n' + AI_NAME + ': '
# Send the entire conversation to GPT-3
try:
response = gpt35_completion(
(PromptReplace(text_block) if engine not in [ 'gpt-3.5-turbo', 'gpt-3.5-turbo-0301', 'gpt-4' ] else PromptReplace()),
conversation,
engine=engine, temperature=temperature, top_p=top_p,
tokens=response_tokens, freq_pen=freq_pen, pres_pen=pres_pen, stop=stop)
if len(response) == 0:
conversation.pop()
continue
except Exception as e:
printerr('GPT experienced an error. Possibly overloaded.', e)
conversation.pop()
continue
# Add the response to the conversation list
conversation.append(f'<<AI_NAME>>: {response}')
if RawOutput:
print(response, end='\n')
else:
# Print the response from GPT-3
printstd(f'{len(conversation):d}. {AI_NAME}:', color=Fore.GREEN)
for _line in response.splitlines():
print(textwrap.fill(_line, width=ScreenColumns))
# eg, devaju -x -c 'command'
if len(cmdTypeAhead) == 0 and cmdExit: sys.exit(0)
# end
| [
"PLACEHOLDER\n\n## SPACETIME\n\nPLACEHOLDER\n",
"PLACEHOLDER\nP L A C E H O L D E R",
"A C E H O L D E R\nPLACEHOLDER",
"ignore",
"P L A C E H O L D E R",
"PLACEHOLDER\\n"
] |
2024-01-10 | AkashiCoin/nonebot-plugin-openai | nonebot_plugin_openai~types.py | from io import BytesIO
from pathlib import Path
import httpx
from openai import AsyncOpenAI
from openai.types.chat import (
ChatCompletionMessage,
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionSystemMessageParam,
)
from openai.types.image import Image
from typing import (
Any,
Callable,
Coroutine,
Dict,
Generic,
List,
Literal,
Optional,
TypeVar,
Union,
)
from pydantic import BaseModel
class Channel(BaseModel):
api_key: str = ""
base_url: Optional[str] = None
organization: Optional[str] = None
class ToolCallResponse:
"""
ToolCallResponse 是一个工具调用响应类,用于封装工具调用的响应内容。
Attributes:
name (str): 工具的名称。
content_type (Literal["str", "openai_image", "image", "audio"]): 发送给用户内容的格式。
content (Optional[Union[Any, str, Image, bytes, Path]]): 用于发送给用户的内容。
data (Optional[str]): 用于回复给openai的内容。
"""
def __init__(
self,
name: str,
content_type: Literal["str", "openai_image", "image", "audio"],
content: Optional[Union[Any, str, Image, bytes, Path]] = None,
data: Optional[str] = None,
):
self.name = name
self.content_type = content_type
self.content = content
self.data = data
class ToolCallConfig(BaseModel):
name: str
enable: bool = True
class ToolCall:
"""
ToolCall类用于封装工具调用的相关信息。
Attributes:
name (str): 工具的名称。
func (Callable[..., Coroutine[Any, Any, ToolCallResponse]]):
工具的函数,它是一个可以接受任意参数的协程函数,返回一个ToolCallResponse对象。
func_info (dict): 关于工具函数的额外信息。
config (ToolCallConfig): 工具的配置信息。
"""
def __init__(
self,
name: str = "",
func: Callable[..., Coroutine[Any, Any, ToolCallResponse]] = None,
func_info: dict = None,
config: ToolCallConfig = ToolCallConfig(name="Unknown"),
):
self.name = name
self.func = func
self.func_info = func_info
self.config = config
class ToolCallRequest:
tool_call: ChatCompletionMessageToolCall
func: Callable[..., Coroutine[Any, Any, ToolCallResponse]]
config: ToolCallConfig
def __init__(
self,
tool_call: ChatCompletionMessageToolCall,
func: Callable[..., Coroutine[Any, Any, ToolCallResponse]],
config: ToolCallConfig,
):
self.tool_call = tool_call
self.func = func
self.config = config
class Preset(BaseModel):
name: str
prompt: str
class Session(BaseModel):
id: str
messages: List[Union[ChatCompletionMessage, ChatCompletionMessageParam]] = []
user: str = ""
preset: Optional[Preset] = None
max_length: int = 8
running: bool = False
def get_messages(self, preset: Preset = None):
if self.preset:
preset = self.preset
_preset = []
if preset:
_preset = [
ChatCompletionSystemMessageParam(content=preset.prompt, role="system")
]
split_length = self.max_length
while (
split_length < len(self.messages)
and isinstance(self.messages[-split_length], dict)
and self.messages[-split_length]["role"] == "tool"
):
split_length += 1
return _preset + self.messages[-split_length:]
T = TypeVar("T", bound=ToolCallConfig)
class FuncContext(Generic[T]):
session: Session
http_client: httpx.AsyncClient
openai_client: AsyncOpenAI
config: T
def __init__(
self,
session: Session,
http_client: httpx.AsyncClient,
openai_client: AsyncOpenAI,
config: T,
):
self.session = session
self.http_client = http_client
self.openai_client = openai_client
self.config = config
| [] |
2024-01-10 | AkashiCoin/nonebot-plugin-openai | test~test_func.py | from openai import OpenAI
from httpx import Client
messages = [
{
"content": "",
"role": "assistant",
"function_call": None,
"tool_calls": [
{
"id": "call_qNz6JXfuqDs3z2RgLPW8Vbhe",
"function": {"arguments": '{"input":"你好"}', "name": "tts"},
"type": "tool",
}
],
},
{
"tool_call_id": "call_qNz6JXfuqDs3z2RgLPW8Vbhe",
"role": "tool",
"name": "tts",
"content": "success to generate audio",
},
]
openai_client = OpenAI(
base_url="https://one.loli.vet/v1",
api_key="sk-",
http_client=Client(follow_redirects=True),
)
print(openai_client.chat.completions.create(messages=messages, model="gpt-3.5-turbo-1106"))
| [
"success to generate audio"
] |
2024-01-10 | AkashiCoin/nonebot-plugin-openai | nonebot_plugin_openai~_openai.py | import asyncio
from io import BytesIO
import json
import random
from typing import List, Literal, Dict, Union
from openai import AsyncOpenAI
from openai.types.chat import (
ChatCompletionMessageToolCall,
ChatCompletionToolMessageParam,
ChatCompletionContentPartImageParam,
ChatCompletionUserMessageParam,
ChatCompletionContentPartTextParam,
ChatCompletionFunctionMessageParam,
ChatCompletionMessage,
ChatCompletion,
)
from openai._exceptions import APIStatusError
from httpx import AsyncClient
from pydantic import BaseModel
from loguru import logger
from .types import (
Channel,
Session,
ToolCall,
ToolCallConfig,
ToolCallResponse,
ToolCallRequest,
FuncContext,
)
from .function import ToolsFunction
class OpenAIClient:
def __init__(
self,
base_url: str,
channels: List[Channel],
tool_func: ToolsFunction,
default_model: str = "gpt-3.5-turbo",
):
self.channels = channels
self.http_client = AsyncClient(base_url=base_url, follow_redirects=True)
self.tool_func = tool_func
self.default_model = default_model
def init_client(self, channel: Channel):
client = AsyncOpenAI(**channel.dict(), http_client=self.http_client)
return client
@property
def client(self):
channel = random.choice(self.channels)
return self.init_client(channel)
async def chat(
self,
session: Session,
prompt: str = "",
model: str = "",
image_url: str = "",
tool_choice: Literal["none", "auto"] = "auto",
) -> List[Union[ToolCallRequest, ChatCompletionMessage]]:
if not model:
model = self.default_model
if prompt:
session.messages.append(
ChatCompletionUserMessageParam(
role="user",
content=(prompt + f"\n") if image_url else prompt,
)
)
results = await self.chat_completions(
session=session, model=model, tool_choice=tool_choice
)
return results
async def chat_completions(
self,
session: Session,
model="gpt-3.5-turbo",
tool_choice: Literal["none", "auto"] = "auto",
) -> List[Union[ToolCallRequest, ChatCompletionMessage]]:
"""
该函数用于生成聊天的完成内容。
参数:
session (Session): 当前的会话对象。
model (str, 可选): 使用的模型名称,默认为"gpt-3.5-turbo"。
tool_choice (Literal["none", "auto"], 可选): 工具选择,默认为"auto"。
返回:
results (list): 包含完成内容的列表。
"""
# 检查模型名称中是否包含"vision"
vision = model.count("vision") > 0
messages = session.get_messages()
if vision:
messages = [messages[-1]]
session.messages.pop()
max_retry = 3
for i in range(max_retry):
try:
# 创建聊天完成内容
chat_completion = await self.client.chat.completions.create(
messages=messages,
model=model,
tool_choice=None if vision or tool_choice == "none" else tool_choice,
tools=None
if vision or tool_choice == "none" # 省 Tokens
else self.tool_func.tools_info(),
user=session.user,
max_tokens=1024 if vision else None,
)
break
except APIStatusError as e:
logger.error(f"请求聊天出错: {e}")
if i == max_retry - 1:
return [
ChatCompletionMessage(
role="assistant",
content=f"请求聊天出错: {e.message}",
)
]
except Exception as e:
logger.error(f"请求聊天出错: {e}")
else:
return [
ChatCompletionMessage(
role="assistant",
content="请求聊天出错",
)
]
return self.make_chat_completion_results(session, chat_completion)
def make_chat_completion_results(
self, session: Session, chat_completion: ChatCompletion
):
logger.info(f"chat_comletion: {chat_completion}")
results = []
choices = chat_completion.choices
for choice in choices:
if choice.message.role == "":
choice.message.role = "assistant"
# 将选择的消息添加到结果列表中
results.append(choice.message)
if choice.message.tool_calls:
# 清空消息内容,防止OpenAI奇怪的报错
choice.message.content = ""
# 遍历每个工具调用
for tool_call in choice.message.tool_calls:
# 将工具调用请求添加到结果列表中
results.append(self.make_tool_request(session, tool_call))
# 如果消息中包含函数调用
if choice.message.function_call:
# 清空消息内容,防止OpenAI奇怪的报错
choice.message.content = ""
# 调用函数
task = self.tool_func.call_function(
function_call=choice.message.function_call,
session=session,
)
# 将函数调用请求添加到结果列表中
results.append(
ToolCallRequest(
tool_call=choice.message.function_call,
func=task,
config=self.tool_func.tool_config[tool_call.function.name],
)
)
# 将选择的消息添加到会话的消息列表中
session.messages.append(choice.message)
results.append(chat_completion.usage)
return results
def make_tool_request(
self, session: Session, tool_call: ChatCompletionMessageToolCall
):
config = self.tool_func.tool_config.get(tool_call.function.name)
task = self.tool_func.call_tool(
tool_call=tool_call,
ctx=FuncContext[type(config)](
session=session,
openai_client=self.client,
http_client=self.http_client,
config=config,
),
)
return ToolCallRequest(
tool_call=tool_call,
func=task,
config=config,
)
async def tts(
self,
input: str,
model: Literal["tts-1", "tts-1-hd"] = "tts-1",
voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"] = "shimmer",
speed: float = 1.0,
ctx: FuncContext[ToolCallConfig] = None,
):
"""
Generates audio from the input text. Can produce a method of speaking to be used in a
voice application.
Args:
input: The text to generate audio for. The maximum length is 4096 characters.
model:
One of the available [TTS models](https://platform.openai.com/docs/models/tts):
`tts-1` or `tts-1-hd`
voice: The voice to use when generating the audio. Supported voices are `alloy`,
`echo`, `fable`, `onyx`, `nova`, and `shimmer`.
speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
the default.
"""
logger.info(f"tts: {input} {model} {voice} {speed}")
resp = ToolCallResponse(
name="tts",
content_type="audio",
content=None,
data="success to generate audio, it has been display.",
)
if isinstance(speed, str):
speed = float(speed)
try:
record = await self.client.audio.speech.create(
input=input, model=model, voice=voice, speed=speed
)
except APIStatusError as e:
logger.error(f"TTS: {e}")
resp.data = f"failed to generate audio, {e.message}"
return resp
except Exception as e:
logger.error(f"TTS: {e}")
resp.data = f"failed to generate audio, {e}"
return resp
resp.content = record.content
return resp
async def gen_image(
self,
prompt: str,
model: Literal["dall-e-2", "dall-e-3"] = "dall-e-3",
quality: Literal["standard", "hd"] = "standard",
size: Literal["1024x1024", "1792x1024", "1024x1792"] = "1024x1024",
style: Literal["vivid", "natural"] = "vivid",
ctx: FuncContext[ToolCallConfig] = None,
):
"""
Creates an image given a prompt.
Args:
prompt: A text description of the desired image(s). The maximum length is 1000
characters for `dall-e-2` and 4000 characters for `dall-e-3`.
model: The model to use for image generation.
quality: The quality of the image that will be generated. `hd` creates images with finer
details and greater consistency across the image. This param is only supported
for `dall-e-3`.
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
`1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
`1024x1792` for `dall-e-3` models.
style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
causes the model to lean towards generating hyper-real and dramatic images.
Natural causes the model to produce more natural, less hyper-real looking
images. This param is only supported for `dall-e-3`.
"""
logger.info(f"gen_image: {prompt} {model} {quality} {size} {style}")
resp = ToolCallResponse(
name="gen_image",
content_type="openai_image",
content=None,
data="failed to generate image",
)
try:
image_resp = await self.client.images.generate(
prompt=prompt,
n=1,
response_format="url",
model=model,
quality=quality,
size=size,
style=style,
)
except APIStatusError as e:
logger.error(f"DALL-E: {e}")
resp.data = f"failed to generate image, {e.message}"
return resp
except Exception as e:
logger.error(f"DALL-E: {e}")
resp.data = f"failed to generate image, {e}"
return resp
if image_resp.created:
data = image_resp.data[0]
resp.data = f"success generate image and it had been display, here is revised prompt of this image: {data.revised_prompt}"
resp.content = data
return resp
async def vision(
self,
url: str,
text: str = "Analyze this picture",
ctx: FuncContext[ToolCallConfig] = None,
):
"""
This function is designed to analyze an image using a chat completion and return the analysis results.
Args:
text (str): The text to be used as context for the image analysis.
url (str): The URL of the image to be analyzed.
Returns:
ToolCallResponse: The response from the tool call, containing the analysis result.
Raises:
APIStatusError: If there is an error with the API status.
Exception: If there is a general error.
"""
logger.info(f"Vision: {text} {url}")
resp = ToolCallResponse(
name="vision",
content_type="str",
content=None,
data="failed to analyze image",
)
try:
analyze_resp = await self.client.chat.completions.create(
messages=[
ChatCompletionUserMessageParam(
role="user",
content=[
ChatCompletionContentPartTextParam(
text=text,
type="text",
),
ChatCompletionContentPartImageParam(
image_url={
"url": url
},
type="image_url",
),
],
),
],
model="gpt-4-vision-preview",
max_tokens=1024,
)
except APIStatusError as e:
logger.error(f"Vision: {e}")
resp.data = f"failed to analyze image, {e.message}"
return resp
except Exception as e:
logger.error(f"Vision: {e}")
resp.data = f"failed to analyze image, {e}"
return resp
if analyze_resp.created:
data = analyze_resp.choices[0].message
resp.data = data.content
return resp
| [
"请求聊天出错"
] |
2024-01-10 | AkashiCoin/nonebot-plugin-openai | test~test_vision.py | import asyncio
import sys
from typing import Coroutine
sys.path.append("./")
from nonebot_plugin_openai._openai import OpenAIClient
from nonebot_plugin_openai.types import Session, Channel
from nonebot_plugin_openai.utils import function_to_json_schema
client = OpenAIClient(
base_url="https://one.loli.vet/v1",
channels=[
Channel(
base_url="https://one.loli.vet/v1",
api_key="sk-",
)
],
default_model="gpt-3.5-turbo-1106"
)
async def test():
prompt = "说句语音:你好"
image_url = ""
session = Session(id="test", messages=[], user="test")
results = await client.chat(session, prompt=prompt, image_url=image_url)
for result in results:
if isinstance(result, Coroutine):
print(f"[Function] 开始调用 {result.__name__} ...")
results.append(await result)
print(results)
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(test())
| [
"说句语音:你好"
] |
2024-01-10 | Nusri7/mcq-generator | src~mcq_generator~MCQGenerator.py | import os
import json
import pandas as pd
from dotenv import load_dotenv
from langchain.llms import Together
from langchain.chains import LLMChain
from langchain.chains import SequentialChain
from langchain.prompts import PromptTemplate
from src.mcq_generator.logger import logging
load_dotenv() # take environment variables from .env.
key = os.getenv("TOGETHER_API_KEY")
llm = Together(
model="NousResearch/Nous-Hermes-2-Yi-34B",
temperature=0.9,
max_tokens=700,
top_k=1,
)
TEMPLATE="""
Text:{text}
You are an expert MCQ maker. Given the above text, it is your job to \
create a quiz of {number} multiple choice questions for {subject} students in {tone} tone.
Ensure to make {number} MCQs and questions are not repeated and check all the questions to be conforming the text as well.
Make sure to format your response to json format like response_json below and use it as a guide. \
I want the output only in json format and not in any other format. like like response_json below.
{response_json}
"""
quiz_generation_prompt = PromptTemplate(
input_variables=["text", "number", "subject", "tone", "response_json"],
template=TEMPLATE
)
quiz_chain=LLMChain(llm=llm, prompt=quiz_generation_prompt, output_key="quiz", verbose=True)
TEMPLATE2="""
You are an expert english grammarian and writer. Given a Multiple Choice Quiz for {subject} students.\
You need to evaluate the complexity of the question and give a complete analysis of the quiz. Only use at max 50 words for complexity analysis.
if the quiz is not at per with the cognitive and analytical abilities of the students,\
update the quiz questions which needs to be changed and change the tone such that it perfectly fits the student abilities
Quiz_MCQs:
{quiz}
Check from an expert English Writer of the above quiz:
"""
quiz_evaluation_prompt=PromptTemplate(input_variables=["subject", "quiz"], template=TEMPLATE)
review_chain=LLMChain(llm=llm, prompt=quiz_evaluation_prompt, output_key="review", verbose=True)
generate_evaluate_chain=SequentialChain(chains=[quiz_chain, review_chain], input_variables=["text", "number", "subject", "tone", "response_json"],
output_variables=["quiz", "review"], verbose=True,)
| [
"number",
"response_json",
"\nText:{text}\nYou are an expert MCQ maker. Given the above text, it is your job to create a quiz of {number} multiple choice questions for {subject} students in {tone} tone. \nEnsure to make {number} MCQs and questions are not repeated and check all the questions to be conforming the text as well.\nMake sure to format your response to json format like response_json below and use it as a guide. I want the output only in json format and not in any other format. like like response_json below.\n\n{response_json}\n\n",
"\nYou are an expert english grammarian and writer. Given a Multiple Choice Quiz for {subject} students.You need to evaluate the complexity of the question and give a complete analysis of the quiz. Only use at max 50 words for complexity analysis. \nif the quiz is not at per with the cognitive and analytical abilities of the students,update the quiz questions which needs to be changed and change the tone such that it perfectly fits the student abilities\nQuiz_MCQs:\n{quiz}\n\nCheck from an expert English Writer of the above quiz:\n",
"tone"
] |
2024-01-10 | psui3905/tensorforce | tensorforce~core~optimizers~kfac.py | # Copyright 2018 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#code refractored from openai/baselines (https://github.com/openai/baselines)
import tensorflow as tf
import numpy as np
import re
from tensorforce.core.optimizers.kfac_utils import *
from tensorforce.core.optimizers import Optimizer
from functools import reduce
KFAC_OPS = ['MatMul', 'Conv2D', 'BiasAdd']
class KFAC(Optimizer):
"""
A non-layers implementation of the Kronecker-factored approximate curvature optimizer.
Meant for usage with Tensorforce.
"""
def __init__(
self,
learning_rate=0.01,
momentum=0.9,
clip_kl=0.01,
kfac_update=2,
stats_accum_iter=60,
full_stats_init=False,
cold_iter=100,
cold_lr=None,
async_=False,
async_stats=False,
epsilon=1e-2,
stats_decay=0.95,
blockdiag_bias=False,
channel_fac=False,
factored_damping=False,
approxT2=False,
use_float64=False,
weight_decay_dict={},
max_grad_norm=0.5,
scope='kfac',
summary_labels=()
):
"""
Initializes a KFAC optimizer.
For more information on arguments, see the Kfac Optimization paper https://arxiv.org/pdf/1503.05671.pdf
"""
self.max_grad_norm = max_grad_norm
self._lr = learning_rate
self._momentum = momentum
self._clip_kl = clip_kl
self._channel_fac = channel_fac
self._kfac_update = kfac_update
self._async = async_
self._async_stats = async_stats
self._epsilon = epsilon
self._stats_decay = stats_decay
self._blockdiag_bias = blockdiag_bias
self._approxT2 = approxT2
self._use_float64 = use_float64
self._factored_damping = factored_damping
self._cold_iter = cold_iter
if cold_lr == None:
# good heuristics
self._cold_lr = self._lr# * 3.
else:
self._cold_lr = cold_lr
self._stats_accum_iter = stats_accum_iter
self._weight_decay_dict = weight_decay_dict
self._diag_init_coeff = 0.
self._full_stats_init = full_stats_init
if not self._full_stats_init:
self._stats_accum_iter = self._cold_iter
self.sgd_step = tf.Variable(0, name='KFAC/sgd_step', trainable=False)
self.global_step = tf.Variable(
0, name='KFAC/global_step', trainable=False)
self.cold_step = tf.Variable(0, name='KFAC/cold_step', trainable=False)
self.factor_step = tf.Variable(
0, name='KFAC/factor_step', trainable=False)
self.stats_step = tf.Variable(
0, name='KFAC/stats_step', trainable=False)
self.vFv = tf.Variable(0., name='KFAC/vFv', trainable=False)
self.factors = {}
self.param_vars = []
self.stats = {}
self.stats_eigen = {}
super(KFAC, self).__init__(scope=scope, summary_labels=summary_labels)
def getFactors(self, g, varlist):
graph = tf.get_default_graph()
factorTensors = {}
fpropTensors = []
bpropTensors = []
opTypes = []
fops = []
def searchFactors(gradient, graph):
# hard coded search stratergy
bpropOp = gradient.op
bpropOp_name = bpropOp.name
bTensors = []
fTensors = []
# combining additive gradient, assume they are the same op type and
# indepedent
if 'AddN' in bpropOp_name:
factors = []
for g in gradient.op.inputs:
factors.append(searchFactors(g, graph))
op_names = [item['opName'] for item in factors]
# TO-DO: need to check all the attribute of the ops as well
print (gradient.name)
print (op_names)
print (len(np.unique(op_names)))
assert len(np.unique(op_names)) == 1, gradient.name + \
' is shared among different computation OPs'
bTensors = reduce(lambda x, y: x + y,
[item['bpropFactors'] for item in factors])
if len(factors[0]['fpropFactors']) > 0:
fTensors = reduce(
lambda x, y: x + y, [item['fpropFactors'] for item in factors])
fpropOp_name = op_names[0]
fpropOp = factors[0]['op']
else:
fpropOp_name = re.search(
'gradientsSampled(_[0-9]+|)/(.+?)_grad', bpropOp_name).group(2)
fpropOp = graph.get_operation_by_name(fpropOp_name)
if fpropOp.op_def.name in KFAC_OPS:
# Known OPs
###
bTensor = [
i for i in bpropOp.inputs if 'gradientsSampled' in i.name][-1]
bTensorShape = fpropOp.outputs[0].get_shape()
if bTensor.get_shape()[0].value == None:
bTensor.set_shape(bTensorShape)
bTensors.append(bTensor)
###
if fpropOp.op_def.name == 'BiasAdd':
fTensors = []
else:
fTensors.append(
[i for i in fpropOp.inputs if param.op.name not in i.name][0])
fpropOp_name = fpropOp.op_def.name
else:
# unknown OPs, block approximation used
bInputsList = [i for i in bpropOp.inputs[
0].op.inputs if 'gradientsSampled' in i.name if 'Shape' not in i.name]
if len(bInputsList) > 0:
bTensor = bInputsList[0]
bTensorShape = fpropOp.outputs[0].get_shape()
if len(bTensor.get_shape()) > 0 and bTensor.get_shape()[0].value == None:
bTensor.set_shape(bTensorShape)
bTensors.append(bTensor)
fpropOp_name = opTypes.append('UNK-' + fpropOp.op_def.name)
return {'opName': fpropOp_name, 'op': fpropOp, 'fpropFactors': fTensors, 'bpropFactors': bTensors}
for t, param in zip(g, varlist):
factors = searchFactors(t, graph)
factorTensors[param] = factors
########
# check associated weights and bias for homogeneous coordinate representation
# and check redundent factors
# TO-DO: there may be a bug to detect associate bias and weights for
# forking layer, e.g. in inception models.
for param in varlist:
factorTensors[param]['assnWeights'] = None
factorTensors[param]['assnBias'] = None
for param in varlist:
if factorTensors[param]['opName'] == 'BiasAdd':
factorTensors[param]['assnWeights'] = None
for item in varlist:
if len(factorTensors[item]['bpropFactors']) > 0:
if (set(factorTensors[item]['bpropFactors']) == set(factorTensors[param]['bpropFactors'])) and (len(factorTensors[item]['fpropFactors']) > 0):
factorTensors[param]['assnWeights'] = item
factorTensors[item]['assnBias'] = param
factorTensors[param]['bpropFactors'] = factorTensors[
item]['bpropFactors']
########
########
# concatenate the additive gradients along the batch dimension, i.e.
# assuming independence structure
for key in ['fpropFactors', 'bpropFactors']:
for i, param in enumerate(varlist):
if len(factorTensors[param][key]) > 0:
if (key + '_concat') not in factorTensors[param]:
name_scope = factorTensors[param][key][0].name.split(':')[
0]
with tf.name_scope(name_scope):
factorTensors[param][
key + '_concat'] = tf.concat(factorTensors[param][key], 0)
else:
factorTensors[param][key + '_concat'] = None
for j, param2 in enumerate(varlist[(i + 1):]):
if (len(factorTensors[param][key]) > 0) and (set(factorTensors[param2][key]) == set(factorTensors[param][key])):
factorTensors[param2][key] = factorTensors[param][key]
factorTensors[param2][
key + '_concat'] = factorTensors[param][key + '_concat']
########
self.factors = factorTensors
return factorTensors
def getStats(self, factors, varlist):
if len(self.stats) == 0:
# initialize stats variables on CPU because eigen decomp is
# computed on CPU
with tf.device('/cpu'):
tmpStatsCache = {}
# search for tensor factors and
# use block diag approx for the bias units
for var in varlist:
fpropFactor = factors[var]['fpropFactors_concat']
bpropFactor = factors[var]['bpropFactors_concat']
opType = factors[var]['opName']
if opType == 'Conv2D':
Kh = var.get_shape()[0]
Kw = var.get_shape()[1]
C = fpropFactor.get_shape()[-1]
Oh = bpropFactor.get_shape()[1]
Ow = bpropFactor.get_shape()[2]
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels do not support
# homogeneous coordinate
var_assnBias = factors[var]['assnBias']
if var_assnBias:
factors[var]['assnBias'] = None
factors[var_assnBias]['assnWeights'] = None
##
for var in varlist:
fpropFactor = factors[var]['fpropFactors_concat']
bpropFactor = factors[var]['bpropFactors_concat']
opType = factors[var]['opName']
self.stats[var] = {'opName': opType,
'fprop_concat_stats': [],
'bprop_concat_stats': [],
'assnWeights': factors[var]['assnWeights'],
'assnBias': factors[var]['assnBias'],
}
if fpropFactor is not None:
if fpropFactor not in tmpStatsCache:
if opType == 'Conv2D':
Kh = var.get_shape()[0]
Kw = var.get_shape()[1]
C = fpropFactor.get_shape()[-1]
Oh = bpropFactor.get_shape()[1]
Ow = bpropFactor.get_shape()[2]
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels
# assume independence between input channels and spatial
# 2K-1 x 2K-1 covariance matrix and C x C covariance matrix
# factorization along the channels do not
# support homogeneous coordinate, assnBias
# is always None
fpropFactor2_size = Kh * Kw
slot_fpropFactor_stats2 = tf.Variable(tf.diag(tf.ones(
[fpropFactor2_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name, trainable=False)
self.stats[var]['fprop_concat_stats'].append(
slot_fpropFactor_stats2)
fpropFactor_size = C
else:
# 2K-1 x 2K-1 x C x C covariance matrix
# assume BHWC
fpropFactor_size = Kh * Kw * C
else:
# D x D covariance matrix
fpropFactor_size = fpropFactor.get_shape()[-1]
# use homogeneous coordinate
if not self._blockdiag_bias and self.stats[var]['assnBias']:
fpropFactor_size += 1
slot_fpropFactor_stats = tf.Variable(tf.diag(tf.ones(
[fpropFactor_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name, trainable=False)
self.stats[var]['fprop_concat_stats'].append(
slot_fpropFactor_stats)
if opType != 'Conv2D':
tmpStatsCache[fpropFactor] = self.stats[
var]['fprop_concat_stats']
else:
self.stats[var][
'fprop_concat_stats'] = tmpStatsCache[fpropFactor]
if bpropFactor is not None:
# no need to collect backward stats for bias vectors if
# using homogeneous coordinates
if not((not self._blockdiag_bias) and self.stats[var]['assnWeights']):
if bpropFactor not in tmpStatsCache:
slot_bpropFactor_stats = tf.Variable(tf.diag(tf.ones([bpropFactor.get_shape(
)[-1]])) * self._diag_init_coeff, name='KFAC_STATS/' + bpropFactor.op.name, trainable=False)
self.stats[var]['bprop_concat_stats'].append(
slot_bpropFactor_stats)
tmpStatsCache[bpropFactor] = self.stats[
var]['bprop_concat_stats']
else:
self.stats[var][
'bprop_concat_stats'] = tmpStatsCache[bpropFactor]
return self.stats
def compute_and_apply_stats(self, loss_sampled, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
stats = self.compute_stats(loss_sampled, var_list=varlist)
return self.apply_stats(stats)
def compute_stats(self, loss_sampled, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
gs = tf.gradients(loss_sampled, varlist, name='gradientsSampled')
self.gs = gs
factors = self.getFactors(gs, varlist)
stats = self.getStats(factors, varlist)
updateOps = []
statsUpdates = {}
statsUpdates_cache = {}
for var in varlist:
opType = factors[var]['opName']
fops = factors[var]['op']
fpropFactor = factors[var]['fpropFactors_concat']
fpropStats_vars = stats[var]['fprop_concat_stats']
bpropFactor = factors[var]['bpropFactors_concat']
bpropStats_vars = stats[var]['bprop_concat_stats']
SVD_factors = {}
for stats_var in fpropStats_vars:
stats_var_dim = int(stats_var.get_shape()[0])
if stats_var not in statsUpdates_cache:
old_fpropFactor = fpropFactor
B = (tf.shape(fpropFactor)[0]) # batch size
if opType == 'Conv2D':
strides = fops.get_attr("strides")
padding = fops.get_attr("padding")
convkernel_size = var.get_shape()[0:3]
KH = int(convkernel_size[0])
KW = int(convkernel_size[1])
C = int(convkernel_size[2])
flatten_size = int(KH * KW * C)
Oh = int(bpropFactor.get_shape()[1])
Ow = int(bpropFactor.get_shape()[2])
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels
# assume independence among input channels
# factor = B x 1 x 1 x (KH xKW x C)
# patches = B x Oh x Ow x (KH xKW x C)
if len(SVD_factors) == 0:
# find closest rank-1 approx to the feature map
S, U, V = tf.batch_svd(tf.reshape(
fpropFactor, [-1, KH * KW, C]))
# get rank-1 approx slides
sqrtS1 = tf.expand_dims(tf.sqrt(S[:, 0, 0]), 1)
patches_k = U[:, :, 0] * sqrtS1 # B x KH*KW
full_factor_shape = fpropFactor.get_shape()
patches_k.set_shape(
[full_factor_shape[0], KH * KW])
patches_c = V[:, :, 0] * sqrtS1 # B x C
patches_c.set_shape([full_factor_shape[0], C])
SVD_factors[C] = patches_c
SVD_factors[KH * KW] = patches_k
fpropFactor = SVD_factors[stats_var_dim]
else:
# poor mem usage implementation
patches = tf.extract_image_patches(fpropFactor, ksizes=[1, convkernel_size[
0], convkernel_size[1], 1], strides=strides, rates=[1, 1, 1, 1], padding=padding)
if self._approxT2:
# T^2 terms * 1/T^2, size: B x C
fpropFactor = tf.reduce_mean(patches, [1, 2])
else:
# size: (B x Oh x Ow) x C
fpropFactor = tf.reshape(
patches, [-1, flatten_size]) / Oh / Ow
fpropFactor_size = int(fpropFactor.get_shape()[-1])
if stats_var_dim == (fpropFactor_size + 1) and not self._blockdiag_bias:
if opType == 'Conv2D' and not self._approxT2:
# correct padding for numerical stability (we
# divided out OhxOw from activations for T1 approx)
fpropFactor = tf.concat([fpropFactor, tf.ones(
[tf.shape(fpropFactor)[0], 1]) / Oh / Ow], 1)
else:
# use homogeneous coordinates
fpropFactor = tf.concat(
[fpropFactor, tf.ones([tf.shape(fpropFactor)[0], 1])], 1)
# average over the number of data points in a batch
# divided by B
cov = tf.matmul(fpropFactor, fpropFactor,
transpose_a=True) / tf.cast(B, tf.float32)
updateOps.append(cov)
statsUpdates[stats_var] = cov
if opType != 'Conv2D':
# HACK: for convolution we recompute fprop stats for
# every layer including forking layers
statsUpdates_cache[stats_var] = cov
for stats_var in bpropStats_vars:
stats_var_dim = int(stats_var.get_shape()[0])
if stats_var not in statsUpdates_cache:
old_bpropFactor = bpropFactor
bpropFactor_shape = bpropFactor.get_shape()
B = tf.shape(bpropFactor)[0] # batch size
C = int(bpropFactor_shape[-1]) # num channels
if opType == 'Conv2D' or len(bpropFactor_shape) == 4:
if fpropFactor is not None:
if self._approxT2:
bpropFactor = tf.reduce_sum(
bpropFactor, [1, 2]) # T^2 terms * 1/T^2
else:
bpropFactor = tf.reshape(
bpropFactor, [-1, C]) * Oh * Ow # T * 1/T terms
else:
# just doing block diag approx. spatial independent
# structure does not apply here. summing over
# spatial locations
bpropFactor = tf.reduce_sum(bpropFactor, [1, 2])
# assume sampled loss is averaged. TO-DO:figure out better
# way to handle this
bpropFactor *= tf.to_float(B)
##
cov_b = tf.matmul(
bpropFactor, bpropFactor, transpose_a=True) / tf.to_float(tf.shape(bpropFactor)[0])
updateOps.append(cov_b)
statsUpdates[stats_var] = cov_b
statsUpdates_cache[stats_var] = cov_b
self.statsUpdates = statsUpdates
return statsUpdates
def apply_stats(self, statsUpdates):
""" compute stats and update/apply the new stats to the running average
"""
def updateAccumStats():
if self._full_stats_init:
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op)
else:
return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))
def updateRunningAvgStats(statsUpdates, fac_iter=1):
# return tf.cond(tf.greater_equal(self.factor_step,
# tf.convert_to_tensor(fac_iter)), lambda:
# tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
return tf.group(*self._apply_stats(statsUpdates))
if self._async_stats:
# asynchronous stats update
update_stats = self._apply_stats(statsUpdates)
queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
item.get_shape() for item in update_stats])
enqueue_op = queue.enqueue(update_stats)
def dequeue_stats_op():
return queue.dequeue()
self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
else:
# synchronous stats update
update_stats_op = tf.cond(tf.greater_equal(
self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
self._update_stats_op = update_stats_op
return update_stats_op
def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.):
updateOps = []
# obtain the stats var list
for stats_var in statsUpdates:
stats_new = statsUpdates[stats_var]
if accumulate:
# simple superbatch averaging
update_op = tf.assign_add(
stats_var, accumulateCoeff * stats_new, use_locking=True)
else:
# exponential running averaging
update_op = tf.assign(
stats_var, stats_var * self._stats_decay, use_locking=True)
update_op = tf.assign_add(
update_op, (1. - self._stats_decay) * stats_new, use_locking=True)
updateOps.append(update_op)
with tf.control_dependencies(updateOps):
stats_step_op = tf.assign_add(self.stats_step, 1)
return [stats_step_op, ]
def getStatsEigen(self, stats=None):
if len(self.stats_eigen) == 0:
stats_eigen = {}
if stats is None:
stats = self.stats
tmpEigenCache = {}
with tf.device('/cpu:0'):
for var in stats:
for key in ['fprop_concat_stats', 'bprop_concat_stats']:
for stats_var in stats[var][key]:
if stats_var not in tmpEigenCache:
stats_dim = stats_var.get_shape()[1].value
e = tf.Variable(tf.ones(
[stats_dim]), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/e', trainable=False)
Q = tf.Variable(tf.diag(tf.ones(
[stats_dim])), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/Q', trainable=False)
stats_eigen[stats_var] = {'e': e, 'Q': Q}
tmpEigenCache[
stats_var] = stats_eigen[stats_var]
else:
stats_eigen[stats_var] = tmpEigenCache[
stats_var]
self.stats_eigen = stats_eigen
return self.stats_eigen
def computeStatsEigen(self):
""" compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue """
# TO-DO: figure out why this op has delays (possibly moving
# eigenvectors around?)
with tf.device('/cpu:0'):
def removeNone(tensor_list):
local_list = []
for item in tensor_list:
if item is not None:
local_list.append(item)
return local_list
def copyStats(var_list):
print("copying stats to buffer tensors before eigen decomp")
redundant_stats = {}
copied_list = []
for item in var_list:
if item is not None:
if item not in redundant_stats:
if self._use_float64:
redundant_stats[item] = tf.cast(
tf.identity(item), tf.float64)
else:
redundant_stats[item] = tf.identity(item)
copied_list.append(redundant_stats[item])
else:
copied_list.append(None)
return copied_list
#stats = [copyStats(self.fStats), copyStats(self.bStats)]
#stats = [self.fStats, self.bStats]
stats_eigen = self.stats_eigen
computedEigen = {}
eigen_reverse_lookup = {}
updateOps = []
# sync copied stats
# with tf.control_dependencies(removeNone(stats[0]) +
# removeNone(stats[1])):
with tf.control_dependencies([]):
for stats_var in stats_eigen:
if stats_var not in computedEigen:
eigens = tf.self_adjoint_eig(stats_var)
e = eigens[0]
Q = eigens[1]
if self._use_float64:
e = tf.cast(e, tf.float32)
Q = tf.cast(Q, tf.float32)
updateOps.append(e)
updateOps.append(Q)
computedEigen[stats_var] = {'e': e, 'Q': Q}
eigen_reverse_lookup[e] = stats_eigen[stats_var]['e']
eigen_reverse_lookup[Q] = stats_eigen[stats_var]['Q']
self.eigen_reverse_lookup = eigen_reverse_lookup
self.eigen_update_list = updateOps
return updateOps
def applyStatsEigen(self, eigen_list):
updateOps = []
print(('updating %d eigenvalue/vectors' % len(eigen_list)))
for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)):
stats_eigen_var = self.eigen_reverse_lookup[mark]
updateOps.append(
tf.assign(stats_eigen_var, tensor, use_locking=True))
with tf.control_dependencies(updateOps):
factor_step_op = tf.assign_add(self.factor_step, 1)
updateOps.append(factor_step_op)
return updateOps
def getKfacPrecondUpdates(self, gradlist, varlist):
updatelist = []
vg = 0.
assert len(self.stats) > 0
assert len(self.stats_eigen) > 0
assert len(self.factors) > 0
counter = 0
grad_dict = {var: grad for grad, var in zip(gradlist, varlist)}
for grad, var in zip(gradlist, varlist):
GRAD_RESHAPE = False
GRAD_TRANSPOSE = False
fpropFactoredFishers = self.stats[var]['fprop_concat_stats']
bpropFactoredFishers = self.stats[var]['bprop_concat_stats']
if (len(fpropFactoredFishers) + len(bpropFactoredFishers)) > 0:
counter += 1
GRAD_SHAPE = grad.get_shape()
if len(grad.get_shape()) > 2:
# reshape conv kernel parameters
KW = int(grad.get_shape()[0])
KH = int(grad.get_shape()[1])
C = int(grad.get_shape()[2])
D = int(grad.get_shape()[3])
if len(fpropFactoredFishers) > 1 and self._channel_fac:
# reshape conv kernel parameters into tensor
grad = tf.reshape(grad, [KW * KH, C, D])
else:
# reshape conv kernel parameters into 2D grad
grad = tf.reshape(grad, [-1, D])
GRAD_RESHAPE = True
elif len(grad.get_shape()) == 1:
# reshape bias or 1D parameters
D = int(grad.get_shape()[0])
grad = tf.expand_dims(grad, 0)
GRAD_RESHAPE = True
else:
# 2D parameters
C = int(grad.get_shape()[0])
D = int(grad.get_shape()[1])
if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias:
# use homogeneous coordinates only works for 2D grad.
# TO-DO: figure out how to factorize bias grad
# stack bias grad
var_assnBias = self.stats[var]['assnBias']
grad = tf.concat(
[grad, tf.expand_dims(grad_dict[var_assnBias], 0)], 0)
# project gradient to eigen space and reshape the eigenvalues
# for broadcasting
eigVals = []
for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
e = detectMinVal(self.stats_eigen[stats][
'e'], var, name='act', debug=False)
Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='act')
eigVals.append(e)
grad = gmatmul(Q, grad, transpose_a=True, reduce_dim=idx)
for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
e = detectMinVal(self.stats_eigen[stats][
'e'], var, name='grad', debug=False)
Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='grad')
eigVals.append(e)
grad = gmatmul(grad, Q, transpose_b=False, reduce_dim=idx)
##
#####
# whiten using eigenvalues
weightDecayCoeff = 0.
if var in self._weight_decay_dict:
weightDecayCoeff = self._weight_decay_dict[var]
if self._factored_damping:
coeffs = 1.
num_factors = len(eigVals)
# compute the ratio of two trace norm of the left and right
# KFac matrices, and their generalization
if len(eigVals) == 1:
damping = self._epsilon + weightDecayCoeff
else:
damping = tf.pow(
self._epsilon + weightDecayCoeff, 1. / num_factors)
eigVals_tnorm_avg = [tf.reduce_mean(
tf.abs(e)) for e in eigVals]
for e, e_tnorm in zip(eigVals, eigVals_tnorm_avg):
eig_tnorm_negList = [
item for item in eigVals_tnorm_avg if item != e_tnorm]
if len(eigVals) == 1:
adjustment = 1.
elif len(eigVals) == 2:
adjustment = tf.sqrt(
e_tnorm / eig_tnorm_negList[0])
else:
eig_tnorm_negList_prod = reduce(
lambda x, y: x * y, eig_tnorm_negList)
adjustment = tf.pow(
tf.pow(e_tnorm, num_factors - 1.) / eig_tnorm_negList_prod, 1. / num_factors)
coeffs *= (e + adjustment * damping)
else:
coeffs = 1.
damping = (self._epsilon + weightDecayCoeff)
for e in eigVals:
coeffs *= e
coeffs += damping
#grad = tf.Print(grad, [tf.convert_to_tensor('1'), tf.convert_to_tensor(var.name), grad.get_shape()])
grad /= coeffs
#grad = tf.Print(grad, [tf.convert_to_tensor('2'), tf.convert_to_tensor(var.name), grad.get_shape()])
#####
# project gradient back to euclidean space
for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
grad = gmatmul(Q, grad, transpose_a=False, reduce_dim=idx)
for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
grad = gmatmul(grad, Q, transpose_b=True, reduce_dim=idx)
##
#grad = tf.Print(grad, [tf.convert_to_tensor('3'), tf.convert_to_tensor(var.name), grad.get_shape()])
if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias:
# use homogeneous coordinates only works for 2D grad.
# TO-DO: figure out how to factorize bias grad
# un-stack bias grad
var_assnBias = self.stats[var]['assnBias']
C_plus_one = int(grad.get_shape()[0])
grad_assnBias = tf.reshape(tf.slice(grad,
begin=[
C_plus_one - 1, 0],
size=[1, -1]), var_assnBias.get_shape())
grad_assnWeights = tf.slice(grad,
begin=[0, 0],
size=[C_plus_one - 1, -1])
grad_dict[var_assnBias] = grad_assnBias
grad = grad_assnWeights
#grad = tf.Print(grad, [tf.convert_to_tensor('4'), tf.convert_to_tensor(var.name), grad.get_shape()])
if GRAD_RESHAPE:
grad = tf.reshape(grad, GRAD_SHAPE)
grad_dict[var] = grad
print(('projecting %d gradient matrices' % counter))
for g, var in zip(gradlist, varlist):
grad = grad_dict[var]
### clipping ###
tf.Print(grad, [tf.sqrt(tf.reduce_sum(tf.pow(grad, 2)))], "Euclidean norm of new grad")
local_vg = tf.reduce_sum(grad * g * (self._lr * self._lr))
vg += local_vg
# rescale everything
scaling = tf.minimum(1., tf.sqrt(self._clip_kl / vg))
with tf.control_dependencies([tf.assign(self.vFv, vg)]):
updatelist = [grad_dict[var] for var in varlist]
for i, item in enumerate(updatelist):
updatelist[i] = scaling * item
return updatelist
def compute_gradients(self, loss, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
g = tf.gradients(loss, varlist)
return [(a, b) for a, b in zip(g, varlist)]
def apply_gradients_kfac(self, grads):
g, varlist = list(zip(*grads))
if len(self.stats_eigen) == 0:
self.getStatsEigen()
qr = None
# launch eigen-decomp on a queue thread
if self._async:
print('Use async eigen decomp')
# get a list of factor loading tensors
factorOps_dummy = self.computeStatsEigen()
# define a queue for the list of factor loading tensors
queue = tf.FIFOQueue(1, [item.dtype for item in factorOps_dummy], shapes=[
item.get_shape() for item in factorOps_dummy])
enqueue_op = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update), tf.convert_to_tensor(
0)), tf.greater_equal(self.stats_step, self._stats_accum_iter)), lambda: queue.enqueue(self.computeStatsEigen()), tf.no_op)
def dequeue_op():
return queue.dequeue()
qr = tf.train.QueueRunner(queue, [enqueue_op])
updateOps = []
global_step_op = tf.assign_add(self.global_step, 1)
updateOps.append(global_step_op)
with tf.control_dependencies([global_step_op]):
# compute updates
assert self._update_stats_op != None
updateOps.append(self._update_stats_op)
dependency_list = []
if not self._async:
dependency_list.append(self._update_stats_op)
with tf.control_dependencies(dependency_list):
def no_op_wrapper():
return tf.group(*[tf.assign_add(self.cold_step, 1)])
if not self._async:
# synchronous eigen-decomp updates
updateFactorOps = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update),
tf.convert_to_tensor(0)),
tf.greater_equal(self.stats_step, self._stats_accum_iter)), lambda: tf.group(*self.applyStatsEigen(self.computeStatsEigen())), no_op_wrapper)
else:
# asynchronous eigen-decomp updates using queue
updateFactorOps = tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter),
lambda: tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(0)),
tf.no_op,
lambda: tf.group(
*self.applyStatsEigen(dequeue_op())),
),
no_op_wrapper)
updateOps.append(updateFactorOps)
with tf.control_dependencies([updateFactorOps]):
def gradOp():
return list(g)
def getKfacGradOp():
return self.getKfacPrecondUpdates(g, varlist)
u = tf.cond(tf.greater(self.factor_step,
tf.convert_to_tensor(0)), getKfacGradOp, gradOp)
optim = tf.train.MomentumOptimizer(
self._lr * (1. - self._momentum), self._momentum)
#optim = tf.train.AdamOptimizer(self._lr, epsilon=0.01)
def optimOp():
def updateOptimOp():
if self._full_stats_init:
return tf.cond(tf.greater(self.factor_step, tf.convert_to_tensor(0)), lambda: optim.apply_gradients(list(zip(u, varlist))), tf.no_op)
else:
return optim.apply_gradients(list(zip(u, varlist)))
if self._full_stats_init:
return tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), updateOptimOp, tf.no_op)
else:
return tf.cond(tf.greater_equal(self.sgd_step, self._cold_iter), updateOptimOp, tf.no_op)
updateOps.append(optimOp())
return tf.group(*updateOps), qr
def apply_gradients(self, grads):
coldOptim = tf.train.MomentumOptimizer(
self._cold_lr, self._momentum)
def coldSGDstart():
sgd_grads, sgd_var = zip(*grads)
if self.max_grad_norm != None:
sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads,self.max_grad_norm)
sgd_grads = list(zip(sgd_grads,sgd_var))
sgd_step_op = tf.assign_add(self.sgd_step, 1)
coldOptim_op = coldOptim.apply_gradients(sgd_grads)
return tf.group(*[sgd_step_op, coldOptim_op])
kfacOptim_op, qr = self.apply_gradients_kfac(grads)
def warmKFACstart():
return kfacOptim_op
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr
def minimize_(self, loss, loss_sampled, var_list=None):
grads = self.compute_gradients(loss, var_list=var_list)
update_stats_op = self.compute_and_apply_stats(
loss_sampled, var_list=var_list)
return self.apply_gradients(grads)
def tf_step(self, time, variables, **kwargs):
"""
Creates the TensorFlow operations for performing an optimization step on the given variables, including
actually changing the values of the variables.
Args:
time: Time tensor. Not used for this optimizer.
variables: List of variables to optimize.
**kwargs:
fn_loss : loss function tensor to differentiate.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
fn_loss = kwargs["fn_loss"]
if variables is None:
variables = tf.trainable_variables
return tf.gradients(fn_loss, variables)
def apply_step(self, variables, deltas, loss_sampled):
"""
Applies the given (and already calculated) step deltas to the variable values.
Args:
variables: List of variables.
deltas: List of deltas of same length.
loss_sampled : the sampled loss
Returns:
The step-applied operation. A tf.group of tf.assign_add ops.
"""
update_stats_op = self.compute_and_apply_stats(
loss_sampled, var_list=var_list)
grads = [(a, b) for a, b in zip(deltas, varlist)]
kfacOptim, _ = self.apply_gradients_kfac(grads)
return kfacOptim
def minimize(self, time, variables, **kwargs):
"""
Performs an optimization step.
Args:
time: Time tensor. Not used for this
variables: List of variables to optimize.
**kwargs:
fn_loss : loss function tensor that is differentiated
sampled_loss : the sampled loss from running the model.
Returns:
The optimization operation.
"""
loss = kwargs["fn_loss"]
sampled_loss = kwargs["sampled_loss"]
min_op, _ = self.minimize_(loss, sampled_loss, var_list=variables)
return min_op
| [] |
2024-01-10 | psui3905/tensorforce | tensorforce~core~optimizers~kfac_utils.py | # Copyright 2018 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#code refractored from openai/baselines
import tensorflow as tf
def gmatmul(a, b, transpose_a=False, transpose_b=False, reduce_dim=None):
assert reduce_dim is not None
# weird batch matmul
if len(a.get_shape()) == 2 and len(b.get_shape()) > 2:
# reshape reduce_dim to the left most dim in b
b_shape = b.get_shape()
if reduce_dim != 0:
b_dims = list(range(len(b_shape)))
b_dims.remove(reduce_dim)
b_dims.insert(0, reduce_dim)
b = tf.transpose(b, b_dims)
b_t_shape = b.get_shape()
b = tf.reshape(b, [int(b_shape[reduce_dim]), -1])
result = tf.matmul(a, b, transpose_a=transpose_a,
transpose_b=transpose_b)
result = tf.reshape(result, b_t_shape)
if reduce_dim != 0:
b_dims = list(range(len(b_shape)))
b_dims.remove(0)
b_dims.insert(reduce_dim, 0)
result = tf.transpose(result, b_dims)
return result
elif len(a.get_shape()) > 2 and len(b.get_shape()) == 2:
# reshape reduce_dim to the right most dim in a
a_shape = a.get_shape()
outter_dim = len(a_shape) - 1
reduce_dim = len(a_shape) - reduce_dim - 1
if reduce_dim != outter_dim:
a_dims = list(range(len(a_shape)))
a_dims.remove(reduce_dim)
a_dims.insert(outter_dim, reduce_dim)
a = tf.transpose(a, a_dims)
a_t_shape = a.get_shape()
a = tf.reshape(a, [-1, int(a_shape[reduce_dim])])
result = tf.matmul(a, b, transpose_a=transpose_a,
transpose_b=transpose_b)
result = tf.reshape(result, a_t_shape)
if reduce_dim != outter_dim:
a_dims = list(range(len(a_shape)))
a_dims.remove(outter_dim)
a_dims.insert(reduce_dim, outter_dim)
result = tf.transpose(result, a_dims)
return result
elif len(a.get_shape()) == 2 and len(b.get_shape()) == 2:
return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
assert False, 'something went wrong'
def clipoutNeg(vec, threshold=1e-6):
mask = tf.cast(vec > threshold, tf.float32)
return mask * vec
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):
eigen_min = tf.reduce_min(input_mat)
eigen_max = tf.reduce_max(input_mat)
eigen_ratio = eigen_max / eigen_min
input_mat_clipped = clipoutNeg(input_mat, threshold)
if debug:
input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(
input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))
return input_mat_clipped
def factorReshape(Q, e, grad, facIndx=0, ftype='act'):
grad_shape = grad.get_shape()
if ftype == 'act':
assert e.get_shape()[0] == grad_shape[facIndx]
expanded_shape = [1, ] * len(grad_shape)
expanded_shape[facIndx] = -1
e = tf.reshape(e, expanded_shape)
if ftype == 'grad':
assert e.get_shape()[0] == grad_shape[len(grad_shape) - facIndx - 1]
expanded_shape = [1, ] * len(grad_shape)
expanded_shape[len(grad_shape) - facIndx - 1] = -1
e = tf.reshape(e, expanded_shape)
return Q, e
| [] |
2024-01-10 | bigscience-workshop/Megatron-DeepSpeed | megatron~model~bert_model.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT model."""
import torch
from megatron import get_args
from megatron import mpu
from megatron.enums import AttnMaskType
from megatron.model.language_model import parallel_lm_logits
from megatron.model.language_model import get_language_model
from megatron.model.fused_layer_norm import MixedFusedLayerNorm as LayerNorm
from megatron.model.utils import openai_gelu, erf_gelu
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.utils import scaled_init_method_normal
from .module import MegatronModule
def bert_extended_attention_mask(attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
attention_mask_bss = attention_mask_b1s * attention_mask_bs1
# [b, 1, s, s]
extended_attention_mask = attention_mask_bss.unsqueeze(1)
# Convert attention mask to binary:
extended_attention_mask = (extended_attention_mask < 0.5)
return extended_attention_mask
def bert_position_ids(token_ids):
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long,
device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
class BertLMHead(MegatronModule):
"""Masked LM head for Bert
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
hidden_size: hidden size
init_method: init method for weight initialization
layernorm_epsilon: tolerance for layer norm divisions
parallel_output: whether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, hidden_size, init_method,
layernorm_epsilon, parallel_output):
super(BertLMHead, self).__init__()
args = get_args()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
mpu.set_tensor_model_parallel_attributes(self.bias, True, 0, 1)
self.parallel_output = parallel_output
self.dense = get_linear_layer(hidden_size, hidden_size, init_method)
self.layernorm = LayerNorm(hidden_size, eps=layernorm_epsilon)
self.gelu = torch.nn.functional.gelu
if args.openai_gelu:
self.gelu = openai_gelu
elif args.onnx_safe:
self.gelu = erf_gelu
def forward(self, hidden_states, word_embeddings_weight):
hidden_states = self.dense(hidden_states)
hidden_states = self.gelu(hidden_states)
hidden_states = self.layernorm(hidden_states)
output = parallel_lm_logits(hidden_states,
word_embeddings_weight,
self.parallel_output,
bias=self.bias)
return output
def post_language_model_processing(lm_output, pooled_output,
lm_head, binary_head,
lm_labels,
logit_weights,
fp16_lm_cross_entropy):
# Output.
lm_logits = lm_head(
lm_output, logit_weights)
binary_logits = None
if binary_head is not None:
binary_logits = binary_head(pooled_output)
if lm_labels is None:
return lm_logits, binary_logits
else:
if fp16_lm_cross_entropy:
assert lm_logits.dtype == torch.half
lm_loss = mpu.vocab_parallel_cross_entropy(lm_logits, lm_labels)
else:
lm_loss = mpu.vocab_parallel_cross_entropy(lm_logits.float(),
lm_labels)
return lm_loss, binary_logits
class BertModel(MegatronModule):
"""Bert Language model."""
def __init__(self,
num_tokentypes=2,
add_binary_head=True,
parallel_output=True,
pre_process=True,
post_process=True):
super(BertModel, self).__init__()
args = get_args()
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.add_binary_head = add_binary_head
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
init_method = init_method_normal(args.init_method_std)
scaled_init_method = scaled_init_method_normal(args.init_method_std,
args.num_layers)
self.language_model, self._language_model_key = get_language_model(
num_tokentypes=num_tokentypes,
add_pooler=self.add_binary_head,
encoder_attn_mask_type=AttnMaskType.padding,
init_method=init_method,
scaled_init_method=scaled_init_method,
pre_process=self.pre_process,
post_process=self.post_process)
self.initialize_word_embeddings(init_method_normal)
if self.post_process:
self.lm_head = BertLMHead(
self.word_embeddings_weight().size(0),
args.hidden_size, init_method, args.layernorm_epsilon, parallel_output)
self._lm_head_key = 'lm_head'
self.binary_head = None
if self.add_binary_head:
self.binary_head = get_linear_layer(args.hidden_size, 2,
init_method)
self._binary_head_key = 'binary_head'
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, bert_model_input, attention_mask,
tokentype_ids=None, lm_labels=None):
extended_attention_mask = bert_extended_attention_mask(attention_mask)
input_ids = bert_model_input
position_ids = bert_position_ids(input_ids)
lm_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids
)
if self.post_process and self.add_binary_head:
lm_output, pooled_output = lm_output
else:
pooled_output = None
if self.post_process:
return post_language_model_processing(lm_output, pooled_output,
self.lm_head, self.binary_head,
lm_labels,
self.word_embeddings_weight(),
self.fp16_lm_cross_entropy)
else:
return lm_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='',
keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(
destination, prefix, keep_vars)
if self.post_process:
state_dict_[self._lm_head_key] \
= self.lm_head.state_dict_for_save_checkpoint(
destination, prefix, keep_vars)
if self.post_process and self.add_binary_head:
state_dict_[self._binary_head_key] \
= self.binary_head.state_dict(destination, prefix, keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(destination, prefix, keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process:
self.lm_head.load_state_dict(
state_dict[self._lm_head_key], strict=strict)
if self.post_process and self.add_binary_head:
self.binary_head.load_state_dict(
state_dict[self._binary_head_key], strict=strict)
# Load word_embeddings.
if self.post_process and not self.pre_process:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict)
| [] |
2024-01-10 | wmvanvliet/dyconnmap | examples~fc_coherence.py | # -*- coding: utf-8 -*-
# Author: Avraam Marimpis <[email protected]>
import numpy as np
np.set_printoptions(precision=3, linewidth=256)
from dyconnmap.fc import coherence, icoherence
if __name__ == "__main__":
data = np.load(
"/home/makism/Github/dyconnmap/examples/data/eeg_32chans_10secs.npy")
data = data[0:5, :]
csdparams = {'NFFT': 256, 'noverlap': 256 / 2.0}
coh = coherence(data, [1.0, 4.0], 128.0, **csdparams)
icoh = icoherence(data, [1.0, 4.0], 128.0)
print("Coherence: \n", coh)
print("Imagenary Coherence: \n", icoh)
| [] |
2024-01-10 | MaxRoenco/GPT | GPT~Whisper.py | import openai
KEY = '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'
file = open('bg.mp3', 'rb')
result = openai.Audio.translate(
api_key=KEY,
model='whisper-1',
file=file,
response_format='text'
)
print(result)
| [] |
2024-01-10 | MaxRoenco/GPT | GPT~telegrambot.py | import telebot
import openai
telegram_key = '6782572542:AAEk_oSfzGq6nRjo4PJf95nmVtAlyZef1Ko'
openai.api_key = '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'
bot = telebot.TeleBot(telegram_key)
@bot.message_handler(commands=['start'])
def hello(message):
bot.send_message(message.chat.id, 'Привет, я твой ChatGPT бот. Готов тебе помочь!')
@bot.message_handler(content_types=['text'])
def main(message):
reply = ''
response = openai.Completion.create(
engine='text-davinci-003',
prompt=message.text,
max_tokens=300,
temperature=0.7,
n=1,
stop=None
)
if response and response.choices:
reply = response.choices[0].text.strip()
else:
reply = 'Ой, что-то не так!('
bot.send_message(message.chat.id, reply)
bot.polling(none_stop=True) | [] |
2024-01-10 | sarang0909/custom_chatgpt_chatbot | src~data_collection~data_processor.py | """A core module to process website text data.
"""
import os
import pandas as pd
from urllib.parse import urlparse
import tiktoken
import openai
from openai.embeddings_utils import distances_from_embeddings
import numpy as np
from src.utility.utils import config
from src.utility import constants
from src.utility.nlp_text_cleaner import remove_newlines
from src.data_collection import web_crawler
openai.api_key = config.get(constants.OPENAI_API_KEY)
class DataProcessor:
"""A class having data processing methods
"""
def __init__(self,full_url) -> None:
"""A Dataprocessor constructor
Args:
full_url (str): Full website URL
"""
# Load the cl100k_base tokenizer which is designed to work with the ada-002 model
self.tokenizer = tiktoken.get_encoding("cl100k_base")
self.max_tokens = 500
#self.domain = config.get(constants.DOMAIN)
self.full_url = full_url
self.local_domain = urlparse(self.full_url).netloc
def create_dataset_from_text_files(self):
"""A method to create data file from website text files.
"""
# Create a list to store the text files
texts = []
# Get all the text files in the text directory
for file in os.listdir(config.get(constants.TEXT_DATA_PATH) + self.local_domain + "/"):
# Open the file and read the text
with open(
config.get(constants.TEXT_DATA_PATH) + self.local_domain + "/" + file,
"r",
encoding="UTF-8",
) as f:
text = f.read()
# Omit the first 11 lines and the last 4 lines, then replace -, _, and #update with spaces.
texts.append(
(
file[11:-4]
.replace("-", " ")
.replace("_", " ")
.replace("#update", ""),
text,
)
)
# Create a dataframe from the list of texts
df = pd.DataFrame(texts, columns=["fname", "text"])
# Set the text column to be the raw text with the newlines removed
df["text"] = df.fname + ". " + remove_newlines(df.text)
df.to_csv(config.get(constants.EMBEDDINGS_DATA_PATH) + self.local_domain + "/scraped.csv")
def tokenize_texts(self):
"""A mthod to tokenize text data
Returns:
dataframe (pandas): A dataframe with tokenized text
"""
df = pd.read_csv(config.get(constants.EMBEDDINGS_DATA_PATH) + self.local_domain + "/scraped.csv", index_col=0)
df.columns = ["title", "text"]
# Tokenize the text and save the number of tokens to a new column
df["n_tokens"] = df.text.apply(lambda x: len(self.tokenizer.encode(x)))
# Visualize the distribution of the number of tokens per row using a histogram
# df.n_tokens.hist()
return df
# Function to split the text into chunks of a maximum number of tokens
def split_into_many(self,text):
"""A method to split the text into predefined samll chunks
Args:
text (str): A text data
Returns:
chunks (list): A list of chunks of split text data
"""
# Split the text into sentences
sentences = text.split(". ")
# Get the number of tokens for each sentence
n_tokens = [len(self.tokenizer.encode(" " + sentence)) for sentence in sentences]
chunks = []
tokens_so_far = 0
chunk = []
# Loop through the sentences and tokens joined together in a tuple
for sentence, token in zip(sentences, n_tokens):
# If the number of tokens so far plus the number of tokens in the current sentence is greater
# than the max number of tokens, then add the chunk to the list of chunks and reset
# the chunk and tokens so far
if tokens_so_far + token > self.max_tokens:
chunks.append(". ".join(chunk) + ".")
chunk = []
tokens_so_far = 0
# If the number of tokens in the current sentence is greater than the max number of
# tokens, go to the next sentence
if token > self.max_tokens:
continue
# Otherwise, add the sentence to the chunk and add the number of tokens to the total
chunk.append(sentence)
tokens_so_far += token + 1
return chunks
def create_initial_dataset(self,df):
"""A method to create initial dataframe from text
Args:
df (pandas): A dataframe with text data
Returns:
df (pandas): A dataframe with tokenized and chunk text data
"""
shortened = []
# Loop through the dataframe
for row in df.iterrows():
# If the text is None, go to the next row
if row[1]["text"] is None:
continue
# If the number of tokens is greater than the max number of tokens, split the text into chunks
if row[1]["n_tokens"] > self.max_tokens:
shortened += self.split_into_many(row[1]["text"])
# Otherwise, add the text to the list of shortened texts
else:
shortened.append(row[1]["text"])
################################################################################
### Step 9
################################################################################
df = pd.DataFrame(shortened, columns=["text"])
df["n_tokens"] = df.text.apply(lambda x: len(self.tokenizer.encode(x)))
# df.n_tokens.hist()
return df
def create_embeddings(self,df):
"""A method to create text embeddings of text chunks
Args:
df (pandas dataframe): A dataframe with tokenized and chunk text data
"""
# Note that you may run into rate limit issues depending on how many files you try to embed
# Please check out our rate limit guide to learn more on how to handle
# this: https://platform.openai.com/docs/guides/rate-limits
df["embeddings"] = df.text.apply(
lambda x: openai.Embedding.create(input=x, engine="text-embedding-ada-002")[
"data"
][0]["embedding"]
)
df.to_csv(config.get(constants.EMBEDDINGS_DATA_PATH) + self.local_domain + "/embeddings.csv")
# df.head()
def get_embeddings(self):
"""A method to retun embeddings dataframe
Returns:
df (pandas dataframe): A dataframe with text embeddings
"""
if not os.path.exists(
config.get(constants.EMBEDDINGS_DATA_PATH)
+ self.local_domain
+ "/"
):
web_crawler.crawl(self.full_url)
self.create_dataset_from_text_files()
dataset = self.tokenize_texts()
dataset = self.create_initial_dataset(dataset)
self.create_embeddings(dataset)
df = pd.read_csv(config.get(constants.EMBEDDINGS_DATA_PATH) + self.local_domain + "/embeddings.csv", index_col=0)
df["embeddings"] = df["embeddings"].apply(eval).apply(np.array)
return df
| [] |
2024-01-10 | Zackperez/python_mvc | Controladores~ventana_tres_Controlador.py | from ventana_principal import *
from Modelos.ventana_tres_Modelo import *
from Vistas.ventana_tres_Vista import *
import json
import openai
class Ventana_Tres_Controller:
def __init__(self, root):
self.model = Ventana_Tres_Model()
self.view = Ventana_Tres_View(root)
self.view.btnEnviar_texto_hacia_IA.config(command=self.chatBot)
def chatBot(self):
try:
openai.api_key =("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")
conversation ="Fui creado por OpenAI. ¿Cómo te puedo ayudar hoy?"
#self.view.historial_de_conversacion.insert(END,conversation)
pregunta_usuario = self.view.txtEntrada_texto_usuario.get()
self.view.historial_de_conversacion.insert(END,"Humano: "+pregunta_usuario)
if pregunta_usuario == "Adios":
print("AI: ¡Adiós!")
self.view.parent.destroy()
conversation += "\nHuman:" + pregunta_usuario + "\nAI:"
response = openai.Completion.create(
model="davinci",
prompt = conversation,
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
stop=["\n"," Human:", " AI:"]
)
respuesta_ia = response.choices[0].text.strip()
conversation += respuesta_ia
self.view.historial_de_conversacion.insert(END,"IA: "+respuesta_ia)
self.view.txtEntrada_texto_usuario.delete(0,END)
except Exception as e:
print(e)
| [] |
2024-01-10 | Zackperez/python_mvc | nosebro.py | import nlpcloud
import os
import openai
token = "0c763b98f814c4649754c8c6e50425f99969aa72"
client = nlpcloud.Client("python-langdetect", token)
# Returns a json object.
def existe_lenguaje(idioma):
for indice in range(len(idioma)):
b = idioma[indice]
if b in lista_lenguajes:
print("idioma detectado: "+idioma)
b = ""
else:
print("El idioma no existe")
lang = client.langdetection("Hard to love")
espanol = "es"
lista_lenguajes = ["es", "de", "ru", "pt", "ko", "ja","en","sl"] #español,aleman,ruso,portugues,coreano,japones,inglés
a = lang.get('languages')
listapy = a[0]
listafinal = listapy.items()
idioma = list(listafinal)[0][0]
existe_lenguaje(idioma)
#for a in range(len(idioma)):
# print(a)
print("Tamaño",len(idioma))
"""
tamano = len(idioma)
existe_lenguaje(idioma)
print(lang)
print(tamano)
"""
"""
# -*- coding: utf-8 -*-
import openai
import json
openai.api_key =("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")
humano_respuestas = []
ia_respuestas = []
def existe_historial():
try:
with open('answer.json') as archivo:
return True
except FileNotFoundError as e:
return False
def devolver_respuestas(humano_respuestas, ia_respuestas):
respuestas = {}
respuestas = {"Humano":humano_respuestas, "IA":ia_respuestas}
return respuestas
def crear_answer_json():
if existe_historial() == True:
nuevos_datos = devolver_respuestas(humano_respuestas, ia_respuestas)
with open("answer.json",encoding="utf-8") as archivo_json:
datos = json.load(archivo_json)
datos.append(nuevos_datos)
with open("answer.json", 'w',encoding="utf-8") as archivo_json:
json.dump(datos, archivo_json, indent=3, ensure_ascii=False)
print("Se han añadido los siguientes datos al archivo " + archivo_json.name+"\n")
print(datos)
else:
with open("answer.json", 'w',encoding="utf-8") as archivo_json:
historial = []
historial.append(devolver_respuestas(humano_respuestas, ia_respuestas))
json.dump(historial, archivo_json, indent=3, ensure_ascii=False)
print(archivo_json.name+" creado exitosamente")
print("Se han añadido los siguientes datos al archivo " + archivo_json.name+"\n")
print(historial)
conversation ="Fui creado por OpenAI. ¿Cómo te puedo ayudar hoy?"
print(conversation)
i = 1
while (i !=0):
question = input("Human: ")
if question == "Adios":
respuestas = devolver_respuestas(humano_respuestas, ia_respuestas)
crear_answer_json()
print("AI: ¡Adiós!")
break
humano_respuestas.append(question)
conversation += "\nHuman:" + question + "\nAI:"
response = openai.Completion.create(
model="davinci",
prompt = conversation,
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
stop=["\n"," Human:", " AI:"]
)
answer = response.choices[0].text.strip()
ia_respuestas.append(response.choices[0].text)
conversation += answer
print("AI: "+ answer)
root = tk.Tk()
app = Controller(root)
root.mainloop()
import tkinter as tk
import tkinter as tk
import json
import nlpcloud
from tkinter import ANCHOR, ttk
import os
import openai
class Model:
def __init__(self):
self.texto_traducir = tk.StringVar()
def get_texto_traducir(self):
return self.texto_traducir
def set_texto_traducir(self, texto_traducir):
self.texto_traducir = texto_traducir
class View(tk.Frame):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.opcion = tk.StringVar()
idiomas = ("Español", "Aleman", "Portugués", "Ruso", "Coreano","Japones")
self.combo_idiomas = ttk.Combobox(self.parent,width=10,textvariable=self.opcion,values=idiomas)
self.combo_idiomas.current(0)
self.combo_idiomas.grid(column=0, row=4)
self.txt_lbl()
self.botones_widget()
self.configurar_ventana()
def txt_lbl(self):
def on_entry_validate(S): return S.isalpha()
vcmd = (root.register(on_entry_validate),'%S')
self.lblTextoTraducir = tk.Label(self.parent,text="texto a traducir: ").grid(row=0, column=0)
self.txtTraducir = tk.Entry(self.parent, validate="key", validatecommand=vcmd)
self.txtTraducir.grid(row=0,column=1,padx=10,pady=10,ipadx=10,ipady=30)
self.lblTextoTraducido = tk.Label(self.parent,text="texto traducido: ").grid(row=0, column=2)
self.lblTextoTraducido = tk.Label(self.parent, text="").grid(row=0, column=3)
self.lblres = tk.Label(self.parent, text="Resultado").grid(row=3, column=0)
def configurar_ventana(self):
self.parent.geometry("480x300")
self.parent.resizable(0, 0)
def botones_widget(self):
self.btnguardar = tk.Button(text="Guardar")
self.btnguardar.grid(row=2, column=0)
self.btnmostrar = tk.Button(text="Mostrar")
self.btnmostrar.grid(row=2, column=1)
def mostrar_resultado(self, message):
self.lblres['text'] = message
def mostrar_error(self,message):
self.lblres['text'] = message
def campo_vacio (self, message):
self.lblres['text'] = message
class Controller:
def __init__(self, root):
self.model = Model()
self.view = View(root)
self.view.btnguardar.config(command=self.guardar_texto)
self.view.btnmostrar.config(command=self.traducir_el_texto)
def guardar_texto(self):
try:
self.model.texto_traducir = self.view.txtTraducir.get()
a = self.view.combo_idiomas.get()
print(a)
except:
self.borrar_campos()
xd = "Verifica que los campos no esten vacío"
print(xd)
self.view.campo_vacio(xd)
def mostrar_texto(self):
texto = self.model.get_texto_traducir()
self.view.lblTextoTraducido['text'] = "Texto traducido", texto
def borrar_campos(self):
try:
self.view.txtTraducir.delete(0, tk.END)
except Exception as a:
print(a)
def muestra_traduccion(self):
texto_traducido = self.model.get_texto_traducir()
self.view.lblres['text'] = texto_traducido
def agregar_datos_generales_json(self, n1, n2, res):
informacion_json_final = []
if self.existe_historial() == True:
nuevos_datos = {"numero1": n1, "numero2": n2, "resultado": res}
with open("historial.json") as archivo_json:
datos = json.load(archivo_json)
datos.append(nuevos_datos)
with open("historial.json", 'w') as archivo_json:
json.dump(datos, archivo_json, indent=3)
print("Se han añadido los siguientes datos al archivo " +archivo_json.name + "\n")
else:
informacion_usuario = {"numero1": n1,"numero2": n2,"resultado": res}
with open("historial.json", 'w') as archivo_json:
informacion_json_final.append(informacion_usuario)
json.dump(informacion_json_final, archivo_json, indent=3)
print(archivo_json.name + " creado exitosamente")
def existe_historial(self):
try:
with open('historial.json') as archivo:
return True
except FileNotFoundError as e:
return False
def combo_seleccion(self):
if self.view.combo_idiomas.get() == "Español":
return "spa_Latn"
if self.view.combo_idiomas.get() == "Aleman":
return "deu_Latn"
if self.view.combo_idiomas.get() == "Portugués":
return "por_Latn"
if self.view.combo_idiomas.get() == "Ruso":
return "rus_Cyrl"
if self.view.combo_idiomas.get() == "Coreano":
return "kor_Hang"
if self.view.combo_idiomas.get() == "Japones":
return "jpn_Jpan"
def traducir_el_texto(self):
idioma = self.combo_seleccion()
client = nlpcloud.Client("nllb-200-3-3b","0c763b98f814c4649754c8c6e50425f99969aa72",gpu=False)
texto_traducido = client.translation(self.model.get_texto_traducir(),source="eng_Latn",target=idioma)
self.view.lblres['text'] = texto_traducido
if __name__ == "__main__":
class Vista:
def __init__(self):
self.ventana1=tk.Tk()
self.ventana1.title("Programa ")
self.ventana1.geometry("500x300")
self.lbln1=tk.Label(self.ventana1, text="Numero 1")
self.lbln1.grid( row=0,column=0)
self.txtn1=tk.Entry()
self.txtn1.grid(row=0,column=1)
self.lbln2=tk.Label(self.ventana1, text="Numero 2")
self.lbln2.grid(row=1,column=0)
self.txtn2=tk.Entry()
self.txtn2.grid(row=1,column=1)
self.lblres=tk.Label(self.ventana1, text="Resultado")
self.lblres.grid(row=3,column=0)
self.btncalcular=tk.Button(text="Calcular",command= self.boton_guardar_clicked)
self.btncalcular.grid(row=2,column=0)
self.control = None
self.ventana1.mainloop()
def mostrar_resultado(self, message):
self.lblres['text'] = message
def set_control(self, control):
self.control = control
def boton_guardar_clicked(self):
if self.control:
self.control.mostrar_resultado(self.lbln1.get())
class Modelo:
def __init__(self,n1):
self.numero1 = n1
def set_numero1(self,n1):
self.numero1 = n1
def get_numero1(self):
return self.numero1
class Controlador:
def __init__(self, modelo, vista):
self.modelo = modelo
self.vista = vista
def muestra_numero (self, n1):
self.modelo.numero1 = n1
self.vista.mostrar_resultado(f'El numero es {n1}')
class App:
def __init__(self):
super().__init__()
modelo = Modelo(2)
vista = Vista()
control = Controlador(modelo,vista)
vista.set_control(control)
if __name__ == '__main__':
ap = App()
"""
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
model="text-davinci-002",
prompt="Translate this into 1. French, 2. Spanish and 3. Japanese:\n\nWhat rooms do you have available?\n\n1. Quels sont les chambres que vous avez disponibles?\n2. ¿Qué habitaciones tienen disponibles?\n3. あなたはどんな部屋を持っていますか?",
temperature=0.3,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0
) | [
"Translate this into 1. French, 2. Spanish and 3. Japanese:\n\nWhat rooms do you have available?\n\n1. Quels sont les chambres que vous avez disponibles?\n2. ¿Qué habitaciones tienen disponibles?\n3. あなたはどんな部屋を持っていますか?"
] |
2024-01-10 | Zackperez/python_mvc | Controladores~ventana_dos_Controlador.py | from unittest import result
from ventana_principal import *
from Modelos.ventana_dos_Modelo import *
from Vistas.ventana_dos_Vista import *
import tkinter as tk
import nlpcloud
import openai
import json
class Ventana_Dos_Controller:
def __init__(self, root):
self.model = Ventana_dos_Model()
self.view = Ventana_dos_View(root)
self.view.btnEnviar.config(command=self.analizar_sentimiento)
self.view.btnspin.config(command=self.preguntas)
def analizar_sentimiento(self):
client = nlpcloud.Client("distilbert-base-uncased-finetuned-sst-2-english", "1c56cb1a8a4b5cb1079f2f2e0c89321585206468", gpu=False, lang="en")
#Para analizar el sentimiento, se necesita que el ususario escriba en un campo y así usar client.sentiment()
sentimiento_analizar = self.view.txtEntrada.get()
res = client.sentiment(sentimiento_analizar)
#Entrar dentro del JSON dentro de 'scored_labels' que retorna RES para sacar el sentimiento analizado
a = res.get('scored_labels')
listapy = a[0]
listafinal = listapy.items()
sentimiento_analizado = list(listafinal)[0][1]
self.crear_traduccion_json("Analizador de sentimientos","Sentimiento a analizar:", sentimiento_analizar, "Sentimiento analizado:", sentimiento_analizado)
self.view.lblresultado ['text'] = sentimiento_analizado #Muestra el sentimiento encontrado
self.view.txtEntrada.delete(0,END)
def preguntas(self):
openai.api_key = ("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")
#Se almacena el valor actual del SPINBOX
cantidad_spinbox= self.view.spin_box.get()
#Se guarda lo que trae el campo de la pregunta
pregunta = self.view.txtEntrada_pregunta.get()
#Se concatena la pregunta con la cantidad que se quiere mostrar
pregunta_final= pregunta + cantidad_spinbox
#La respuesta está almacenada dentro de un JSON por el cual hay que entrar para obtener la respuesta
response = openai.Completion.create(
model="text-davinci-002",
prompt=pregunta_final,
temperature=0.5,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
#Dentro del JSON que retorna, se encuentra dentro del primer puesto (choices) dentro de 'text'
lista_generada = response.choices[0].text.strip() #Al principio hay un pequeño espacio por lo cual se usa .strip para eliminarlo
#Se inserta el resultado dentro del widget txtRespuesta
self.view.txtRespuesta.insert(END, lista_generada)
self.view.txtEntrada_pregunta.delete(0,END)
self.crear_traduccion_json("Generador de listas","Lista a generar:", pregunta, "Lista generada:" , lista_generada)
def existe_historial(self):
try:
with open('historial.json') as archivo:
return True
except FileNotFoundError as e:
return False
def devolver_respuestas(self,nombre_servicio, espacio_1, valor_espacio_1, espacio_2, valor_espacio_2):
diccionario_generado = {}
diccionario_generado = {espacio_1 : valor_espacio_1, espacio_2 : valor_espacio_2}
respuesta_final = {nombre_servicio: diccionario_generado}
return respuesta_final
def crear_traduccion_json(self, nombre_servicio, espacio_1, valor_espacio_1, espacio_2, valor_espacio_2):
if self.existe_historial() == True:
nuevos_datos = self.devolver_respuestas(nombre_servicio, espacio_1, valor_espacio_1, espacio_2, valor_espacio_2)
with open("historial.json",encoding="utf-8") as archivo_json:
datos = json.load(archivo_json)
datos.append(nuevos_datos)
with open("historial.json", 'w',encoding="utf-8") as archivo_json:
json.dump(datos, archivo_json, indent=3, ensure_ascii=False)
print("Se han añadido los siguientes datos al archivo " + archivo_json.name+"\n")
else:
with open("historial.json", 'w',encoding="utf-8") as archivo_json:
historial = []
historial.append(self.devolver_respuestas(nombre_servicio, espacio_1, valor_espacio_1, espacio_2, valor_espacio_2))
json.dump(historial, archivo_json, indent=3, ensure_ascii=False)
print(archivo_json.name+" creado exitosamente")
print("Se han añadido los siguientes datos al archivo " + archivo_json.name+"\n") | [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | Jacoby6000/doomer | doomer~language_models.py | from abc import ABC, abstractmethod
from typing import Any
from os import path
import json
import openai
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
import requests
from doomer.discord_utils import curlify, hundo_to_float
from doomer.settings import SETTINGS_DIR
class LanguageModel(ABC):
def __init__(self, model_name: str, settings: dict) -> None:
self.model_name = model_name
self.settings = settings
if path.exists(SETTINGS_DIR / f"{model_name}.json"):
with open(SETTINGS_DIR / f"{model_name}.json", "r") as infile:
self.settings.update(json.load(infile))
@abstractmethod
def completion_handler(self, prompt: str, **kwargs: Any) -> str:
raise NotImplementedError
class OpenAIGPT3LanguageModel(LanguageModel):
def __init__(self, model_name: str = "openai-gpt3") -> None:
settings = {
"temperature": 100,
"frequency_penalty": 0,
"presence_penalty": 50,
}
super().__init__(model_name, settings)
def completion_handler(self, prompt: str, max_tokens: int, stop: list = None):
completion = openai.Completion.create(
engine="davinci",
prompt=prompt,
max_tokens=max_tokens,
frequency_penalty=hundo_to_float(self.settings["frequency_penalty"]),
temperature=hundo_to_float(self.settings["temperature"]),
presence_penalty=hundo_to_float(self.settings["presence_penalty"]),
stop=stop,
)
completion_text = completion.choices[0].text
return completion_text
class ExafunctionGPTJLanguageModel(LanguageModel):
def __init__(self, api_key: str, model_name: str = "exafunction-gptj") -> None:
settings = {"temperature": 100, "min_tokens": 0}
self.api_url = "https://nlp-server.exafunction.com/text_completion"
self.api_key = api_key
super().__init__(model_name, settings)
def completion_handler(self, prompt: str, max_tokens: int, **kwargs: any):
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
payload = {
"prompt": prompt,
"max_length": max_tokens,
"min_length": self.settings["min_tokens"],
"temperature": hundo_to_float(self.settings["temperature"]),
"remove_input": "true",
}
response = requests.post(self.api_url, json=payload, headers=headers)
completion = response.json()
completion_text = completion["text"]
return completion_text
class AI21JurassicLanguageModel(LanguageModel):
def __init__(
self,
api_key: str,
model_type: str = "j1-jumbo",
model_name: str = "ai21-jurassic",
) -> None:
settings = {
"model_type": model_type,
"temperature": 100,
"top_p": 100,
"max_tokens": 16,
}
self.api_key = api_key
super().__init__(model_name, settings)
@property
def api_url(self) -> str:
return f"https://api.ai21.com/studio/v1/{self.settings['model_type']}/complete"
def completion_handler(
self, prompt: str, max_tokens: int, stop: list = None, **kwargs: any
):
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
payload = {
"prompt": prompt,
"maxTokens": max_tokens,
"temperature": hundo_to_float(self.settings["temperature"]),
"topP": hundo_to_float(self.settings["top_p"]),
"stopSequences": stop if stop else [],
}
response = requests.post(self.api_url, json=payload, headers=headers)
completion = response.json()
completion_text = completion["completions"][0]["data"]["text"]
return completion_text
class GPT2TransformersLanguageModel(LanguageModel):
def __init__(self, tokenizer_name: str, model_name: str) -> None:
self.tokenizer_name = tokenizer_name
self.model_name = model_name
self._tokenizer = None
self._model = None
settings = {"temperature": 100, "top_p": 100, "top_k": 0, "max_length": 1024}
super().__init__(model_name, settings)
def update_tokenizer(self, tokenizer_name: str):
return GPT2TokenizerFast.from_pretrained(tokenizer_name)
def update_model(self, model_name: str):
return GPT2LMHeadModel.from_pretrained(model_name)
def initialize(self):
self._tokenizer = self.update_tokenizer(self.tokenizer_name)
self._model = self.update_model(self.model_name)
def completion_handler(self, prompt: str, max_tokens: int = None, **kwargs: Any):
if not self._tokenizer or not self._model:
self.initialize()
if not max_tokens:
max_tokens = self.max_length
inputs = self._tokenizer(prompt, return_tensors="pt")
input_len = len(inputs["input_ids"][0])
full_completion = self._model.generate(
**inputs,
do_sample=True,
max_length=input_len + max_tokens,
top_p=hundo_to_float(self.settings["top_p"]),
top_k=hundo_to_float(self.settings["top_k"]),
)
completion = full_completion[0][input_len:]
completion.resize_(1, len(completion))
completion_text = self._tokenizer.decode(
completion[0], skip_special_tokens=True
)
return completion_text
| [] |
2024-01-10 | lilyhcn1/ExcelRunAnyCode | %E8%80%81%E9%BB%84%E7%89%9B%E5%B0%8F%E5%B7%A5%E5%85%B7~%E8%84%9A%E6%9C%AC%E6%96%87%E4%BB%B6~py~testpy.py | #一、 引用函数、库等写在最上方
#11111111111111111111111111111111111111111111111111111111111111
import openai
def askChatGPT(messages):
MODEL = "gpt-3.5-turbo"
response = openai.ChatCompletion.create(
model=MODEL,
messages = messages,
temperature=0)
try:
r=response['choices'][0]['message']['content']
except:
r="返回错误,请检查网络!"
return r
def transtrate(msg,flag="英文"):
messages=[
{"role": "system","content":"你是一个翻译机器人,你会帮我把以下的文本翻译为"+flag+":"},
{"role": "user", "content": ""+msg},
]
return askChatGPT(messages)
def chat(msg):
messages=[
{"role": "system","content":"你是一个聊天机器人。"},
{"role": "user", "content": ""+msg},
]
return askChatGPT(messages)
def rolechat(msg,flag="对联"):
messages=[
{"role": "system","content":"你是一个"+flag+"机器人"},
{"role": "user", "content": ""+msg},
]
return askChatGPT(messages)
#11111111111111111111111111111111111111111111111111111111111111
# ---------------r34.cc制作 excel 的输入输出---------------
import os,base64,time,sys,json,lilyfun # 导入同路径下的函数
prflag = "true" # 是否打印输出,true输出
inarr,outarr={},{}
lilyfun.tj()
# 二、运行出错时,默认的输入、输出的默认标题行
#2222222222222222222222222222222222222222222222222222222222222
#输入文本
inarr["角色名"]="对联"
inarr["聊天文本"]="天上一只鸟"
inarr["chatgptkey"]="GetFromIni"
inarr[""]=""
inarr[""]=""
outarr["AI回复"] = "待返回"
outarr[""] = ""
outarr[""] = ""
outarr[""] = ""
outarr[""] = ""
#文件标志
fkeyold="" #输入变量中,哪个是文件的标记。
fkeynew="" #输出变量中,哪个是文件的标记。
#2222222222222222222222222222222222222222222222222222222222222
config=lilyfun.readiniconfig()
inarr=lilyfun.updatearrfromini(inarr,config)
def main(fd2={}):
global inarr,outarr,prflag,fkeyold,fkeynew,mlkey,config
arr2ret,valarr,errarr = {},{},{}
wholepath,f64="",""
errarr=lilyfun.merge(inarr,outarr) #合并字典
# ----------------三、初始化读取数据:----------------------
#读valarr,即读标题及标题对应的值
# ----------------[1/4]读取fd2,生成base64字符串 json64 -----
try: #1.1读取fd2,即传入字典
json64=lilyfun.getfd2(fd2,"json64")
except:
#errarr["执行结果"]="读取fd2错误,请检查!"
errarr = lilyfun.printvalarr(errarr,"读取fd2错误,请检查!",prflag)
return lilyfun.mboutputarr(fd2,prflag,errarr)
lilyfun.titlepr("[1/4] fd2 传入成功!:","传入成功",prflag)
# ----------------[2/4] 生成f64,json64解码为jsonarr -----------
try: #1.2 json64解码为jsonarr
jsonarr = lilyfun.json64tojsonarr(json64)
jsoncontentarr =jsonarr["contents"]
jsoncontentarr=lilyfun.updatearrfromini(jsoncontentarr,config)
jsonarr["contents"]=jsoncontentarr
f64=lilyfun.getfd2_f64(fd2,fkeyold,jsonarr)
except:
errarr = lilyfun.printvalarr(errarr,"jsonarr解码错误,请检查!",prflag)
#fd2:函数传过来的值,arr2ret:运行得到的数组,prflag:打印标记
return lilyfun.mboutputarr(fd2,prflag,errarr)
lilyfun.titlepr("[2/4] 解码成功 jsonarr:",jsonarr,prflag)
# ----------------[3/4]按输入区读传过来的值并反馈到字典valarr ------
try:
valarr=lilyfun.getvalarr(jsonarr,inarr,outarr,prflag)
except:
errarr = lilyfun.printvalarr(errarr,"[读输入]标题行没有需要的值,请确保标题行存在。",prflag)
return lilyfun.mboutputarr(fd2,prflag,errarr)
lilyfun.titlepr("","获取到的f64的长度为: " + str(len(f64)),prflag)
lilyfun.titlepr("[3/4] 检查输入值成功 valarr:",valarr,prflag)
#这里可能还要再写读文件的
# ----------------[4/4]调用函数并生成arr2ret及f64 -------------------
try: # 运行函数,最后要生成arr2ret及f64
old_filepath=lilyfun.randfile(valarr,fkeyold,"old")
new_filepath=lilyfun.randfile(jsoncontentarr,fkeynew,"new")
old_filepath=lilyfun.writefile64(f64,old_filepath)
except: # 保存函数出错后的执行结果
valarr = lilyfun.printvalarr(valarr,"[运行]读写文件错误。",prflag)
return lilyfun.mboutputarr(fd2,prflag,valarr)
#3333333333333333333333333333333333333333333333333333
#txt=mainrun(valarr,old_filepath,new_filepath)
#inarr 角色名 聊天文本 chatgptkey
#inarr AI回复
#fkeyold fkeynew
try: # 运行函数,最后要生成arr2ret及f64
msg=valarr["聊天文本"]
flag=valarr["角色名"]
openai.api_key = valarr["chatgptkey"]
arr2ret["AI回复"]="AI回复测试"
except Exception as e:# 保存函数出错后的执行结果
valarr = lilyfun.printvalarr(valarr,"[运行]调用函数出错,请检查值是否正确。" +"\n"+'错误类型:'+ e.__class__.__name__+"\n"+ '错误明细:'+str(e))
return lilyfun.mboutputarr(fd2,prflag,valarr)
except Exception as e:# 保存函数出错后的执行结果
valarr = lilyfun.printvalarr(valarr,"[运行]调用函数出错,请检查值是否正确。" +"\n"+'错误类型:'+ e.__class__.__name__+"\n"+ '错误明细:'+str(e))
return lilyfun.mboutputarr(fd2,prflag,valarr)
#3333333333333333333333333333333333333333333333333333
try: # 运行函数,最后要生成arr2ret及f64
f64=lilyfun.readfile2f64(new_filepath)#有新文件就读取
# newpath = valarr[fkeynew]
# if f64!="" and fkeynew!="" and fd2=={}:
# lilyfun.writefile64(f64,newpath)
lilyfun.safedel(old_filepath)
lilyfun.safedel(new_filepath)
arr2ret["执行结果"]="√"
except: # 保存函数出错后的执行结果
valarr = lilyfun.printvalarr(valarr,"[执行函数后]读写、删除文件错误。",prflag)
return lilyfun.mboutputarr(fd2,prflag,valarr)
lilyfun.titlepr("[4/4] 函数执行成功 arr2ret:",arr2ret,prflag)
# ----------------五、写入文件,并返回字典 -------------------
try: # 写入文件
if fd2=={} and fkeynew !="":
excelfolder=lilyfun.safegetkey(jsonarr,"excelpath")
raltiveapth=jsonarr["contents"][fkeynew]
wholepath=lilyfun.getwholepath(raltiveapth,excelfolder)
lilyfun.titlepr("执行、输出成功。","",prflag)
#这是最关键的返回函数,并写入文件
#print(wholepath)
return lilyfun.mboutputarr(fd2,prflag,arr2ret,f64,wholepath,"key")
except: # 保存函数出错后的执行结果
valarr = lilyfun.printvalarr(valarr,"写入文件出错,请检查值是否正确。",prflag)
lilyfun.titlepr("最后写入文件出错,请检查值是否正确。","",prflag)
return lilyfun.mboutputarr(fd2,prflag,valarr)
if __name__ == '__main__':
main()
#fd2的内容
# "json64": json文件 上传过来的json
# "f64": f64 上传的文件的base64(只能一个文件)
# "fkeyold": fkeyold 上传时的标题行(只能一个文件)
# "fkeynew": fkeynew 返回时的标题行(只能一个文件)
#fd2,prflag="true",arr2ret,f64="",fkeynew="",keyflag="all"
#fd2:传过来的值,prflag:打印标记,keyflag:excel中的是否全部输出
#arr2ret:运行得到的字典,f64:反馈文件的base64,fkeynew:输出值
#return lilyfun.mboutputarr(fd2,prflag,errarr)
| [
"你是一个聊天机器人。",
"你是一个PLACEHOLDER机器人",
"你是一个翻译机器人,你会帮我把以下的文本翻译为PLACEHOLDER:"
] |
2024-01-10 | lilyhcn1/ExcelRunAnyCode | %E8%80%81%E9%BB%84%E7%89%9B%E5%B0%8F%E5%B7%A5%E5%85%B7~%E8%84%9A%E6%9C%AC%E6%96%87%E4%BB%B6~py~chatgpt%E8%87%AA%E7%94%B1%E8%81%8A%E5%A4%A9py.py | #一、 引用函数、库等写在最上方
#11111111111111111111111111111111111111111111111111111111111111
import openai
def askChatGPT(messages):
MODEL = "gpt-3.5-turbo"
response = openai.ChatCompletion.create(
model=MODEL,
messages = messages,
temperature=0)
try:
r=response['choices'][0]['message']['content']
except:
r="返回错误,请检查网络!"
return r
def transtrate(msg,flag="英文"):
messages=[
{"role": "system","content":"你是一个翻译机器人,你会帮我把以下的文本翻译为"+flag+":"},
{"role": "user", "content": ""+msg},
]
return askChatGPT(messages)
def chat(msg,flag):
messages=[
{"role": "system","content":flag},
{"role": "user", "content": ""+msg},
]
return askChatGPT(messages)
def rolechat(msg,flag="对联"):
messages=[
{"role": "system","content":"你是一个"+flag},
{"role": "user", "content": ""+msg},
]
return askChatGPT(messages)
#11111111111111111111111111111111111111111111111111111111111111
# ---------------r34.cc制作 excel 的输入输出---------------
import os,base64,time,sys,json,lilyfun # 导入同路径下的函数
prflag = "true" # 是否打印输出,true输出
inarr,outarr={},{}
lilyfun.tj()
# 二、运行出错时,默认的输入、输出的默认标题行
#2222222222222222222222222222222222222222222222222222222222222
#输入文本
inarr["角色描述"]="你是对联机器人。"
inarr["聊天文本"]="天上一只鸟"
inarr["chatgptkey"]="GetFromIni"
inarr[""]=""
inarr[""]=""
outarr["AI回复"] = "待返回"
outarr[""] = ""
outarr[""] = ""
outarr[""] = ""
outarr[""] = ""
#文件标志
fkeyold="" #输入变量中,哪个是文件的标记。
fkeynew="" #输出变量中,哪个是文件的标记。
#2222222222222222222222222222222222222222222222222222222222222
config=lilyfun.readiniconfig()
inarr=lilyfun.updatearrfromini(inarr,config)
def main(fd2={}):
global inarr,outarr,prflag,fkeyold,fkeynew,mlkey,config
arr2ret,valarr,errarr = {},{},{}
wholepath,f64="",""
errarr=lilyfun.merge(inarr,outarr) #合并字典
# ----------------三、初始化读取数据:----------------------
#读valarr,即读标题及标题对应的值
# ----------------[1/4]读取fd2,生成base64字符串 json64 -----
try: #1.1读取fd2,即传入字典
json64=lilyfun.getfd2(fd2,"json64")
except:
#errarr["执行结果"]="读取fd2错误,请检查!"
errarr = lilyfun.printvalarr(errarr,"读取fd2错误,请检查!",prflag)
return lilyfun.mboutputarr(fd2,prflag,errarr)
lilyfun.titlepr("[1/4] fd2 传入成功!:","传入成功",prflag)
# ----------------[2/4] 生成f64,json64解码为jsonarr -----------
try: #1.2 json64解码为jsonarr
jsonarr = lilyfun.json64tojsonarr(json64)
jsoncontentarr =jsonarr["contents"]
jsoncontentarr=lilyfun.updatearrfromini(jsoncontentarr,config)
jsonarr["contents"]=jsoncontentarr
f64=lilyfun.getfd2_f64(fd2,fkeyold,jsonarr)
except:
errarr = lilyfun.printvalarr(errarr,"jsonarr解码错误,请检查!",prflag)
#fd2:函数传过来的值,arr2ret:运行得到的数组,prflag:打印标记
return lilyfun.mboutputarr(fd2,prflag,errarr)
lilyfun.titlepr("[2/4] 解码成功 jsonarr:",jsonarr,prflag)
# ----------------[3/4]按输入区读传过来的值并反馈到字典valarr ------
try:
valarr=lilyfun.getvalarr(jsonarr,inarr,outarr,prflag)
except:
errarr = lilyfun.printvalarr(errarr,"[读输入]标题行没有需要的值,请确保标题行存在。",prflag)
return lilyfun.mboutputarr(fd2,prflag,errarr)
lilyfun.titlepr("","获取到的f64的长度为: " + str(len(f64)),prflag)
lilyfun.titlepr("[3/4] 检查输入值成功 valarr:",valarr,prflag)
#这里可能还要再写读文件的
# ----------------[4/4]调用函数并生成arr2ret及f64 -------------------
try: # 运行函数,最后要生成arr2ret及f64
old_filepath=lilyfun.randfile(valarr,fkeyold,"old")
new_filepath=lilyfun.randfile(jsoncontentarr,fkeynew,"new")
old_filepath=lilyfun.writefile64(f64,old_filepath)
except: # 保存函数出错后的执行结果
valarr = lilyfun.printvalarr(valarr,"[运行]读写文件错误。",prflag)
return lilyfun.mboutputarr(fd2,prflag,valarr)
#3333333333333333333333333333333333333333333333333333
#txt=mainrun(valarr,old_filepath,new_filepath)
#inarr 角色描述 聊天文本 chatgptkey
#inarr AI回复
#fkeyold fkeynew
try: # 运行函数,最后要生成arr2ret及f64
msg=valarr["聊天文本"]
flag=valarr["角色描述"]
openai.api_key = valarr["chatgptkey"]
r=chat(msg,flag)
arr2ret["AI回复"]=r
except Exception as e:# 保存函数出错后的执行结果
valarr = lilyfun.printvalarr(valarr,"[运行]调用函数出错,请检查值是否正确。" +"\n"+'错误类型:'+ e.__class__.__name__+"\n"+ '错误明细:'+str(e))
return lilyfun.mboutputarr(fd2,prflag,valarr)
except Exception as e:# 保存函数出错后的执行结果
valarr = lilyfun.printvalarr(valarr,"[运行]调用函数出错,请检查值是否正确。" +"\n"+'错误类型:'+ e.__class__.__name__+"\n"+ '错误明细:'+str(e))
return lilyfun.mboutputarr(fd2,prflag,valarr)
#3333333333333333333333333333333333333333333333333333
try: # 运行函数,最后要生成arr2ret及f64
f64=lilyfun.readfile2f64(new_filepath)#有新文件就读取
# newpath = valarr[fkeynew]
# if f64!="" and fkeynew!="" and fd2=={}:
# lilyfun.writefile64(f64,newpath)
lilyfun.safedel(old_filepath)
lilyfun.safedel(new_filepath)
arr2ret["执行结果"]="√"
except: # 保存函数出错后的执行结果
valarr = lilyfun.printvalarr(valarr,"[执行函数后]读写、删除文件错误。",prflag)
return lilyfun.mboutputarr(fd2,prflag,valarr)
lilyfun.titlepr("[4/4] 函数执行成功 arr2ret:",arr2ret,prflag)
# ----------------五、写入文件,并返回字典 -------------------
try: # 写入文件
if fd2=={} and fkeynew !="":
excelfolder=lilyfun.safegetkey(jsonarr,"excelpath")
raltiveapth=jsonarr["contents"][fkeynew]
wholepath=lilyfun.getwholepath(raltiveapth,excelfolder)
lilyfun.titlepr("执行、输出成功。","",prflag)
#这是最关键的返回函数,并写入文件
#print(wholepath)
return lilyfun.mboutputarr(fd2,prflag,arr2ret,f64,wholepath,"key")
except: # 保存函数出错后的执行结果
valarr = lilyfun.printvalarr(valarr,"写入文件出错,请检查值是否正确。",prflag)
lilyfun.titlepr("最后写入文件出错,请检查值是否正确。","",prflag)
return lilyfun.mboutputarr(fd2,prflag,valarr)
if __name__ == '__main__':
main()
#fd2的内容
# "json64": json文件 上传过来的json
# "f64": f64 上传的文件的base64(只能一个文件)
# "fkeyold": fkeyold 上传时的标题行(只能一个文件)
# "fkeynew": fkeynew 返回时的标题行(只能一个文件)
#fd2,prflag="true",arr2ret,f64="",fkeynew="",keyflag="all"
#fd2:传过来的值,prflag:打印标记,keyflag:excel中的是否全部输出
#arr2ret:运行得到的字典,f64:反馈文件的base64,fkeynew:输出值
#return lilyfun.mboutputarr(fd2,prflag,errarr)
| [
"你是一个翻译机器人,你会帮我把以下的文本翻译为PLACEHOLDER:",
"你是一个PLACEHOLDER"
] |
2024-01-10 | higgsfield/Imagination-Augmented-Agents | common~multiprocessing_env.py | #This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
| [] |
2024-01-10 | valkyre123/test | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | PhillipeJoos/training_pkg | src~duck_gpt~gpt_machine.py | import os
import rospy
from std_msgs.msg import String, Int32 # importar mensajes de ROS tipo String y tipo Int32
import smach
import smach_ros
from listen_machine import Listen
from avanzar_machine import Avanzar
from bailar_machine import Bailar
from girar_machine import Girar
from chat_machine import Chat
from openai import OpenAI
import typer
from rich import print
from speak import speak
class GPT(smach.State):
def __init__(self):
# Publishers
self.pub_instruccion = rospy.Publisher("/duckiebot/voz/resp", String, queue_size=1)
# iniciar ChatGPT con la key que se encuentra en el archivo api_key.txt
with open("../api_key/api_key.txt", "r") as f:
key = f.read().strip()
self.client = OpenAI(api_key=key)
# definición de estados, entradas y salidas
smach.State.__init__(self,
outcomes=['succeeded', 'aborted', 'avanzar', 'girar', 'bailar', 'chat'],
input_keys=['prompt'],
output_keys=['distance', 'direction', 'time', 'angle', 'response'])
# definición del contexto para ChatGPT, donde se especifica las instrucciones
self.context = {"role": "system",
"content": """
Nombre: DuckNorris
Descripción: Eres un vehículo autónomo programado para los habitantes
de una ciudad de patos llamada Duckietown con 100 habitantes. Tu misión es
transportar a los patos de un lugar a otro de la ciudad,
respondiendo a sus preguntas y resolviendo sus dudas. Responde
todas las dudas de manera breve y concisa en español, sin tildes. ¡Buena suerte!
Instrucciones: Toda instrucción recibida debe ser transformada a una de las siguientes
opcines en el formato indicado y responder solamente eso. Si la instrucción no es ninguna de las siguientes
responder según el contexto.
1. Si lo recibido es similar a "avanzar" una cierta distancia
responder "avanzar X cm".
2. Si lo recibido es similar a "girar" en una cierta direccion un cierto angulo responder,
"girar direccion angulo". Si no se especifica un angulo responder "girar direccion 360". Si no
se especifica una direccion responder "girar izquierda angulo". Si el angulo se especifica en
radianes transformarlo a grados.
3. Si lo recibido es similar a "bailar" una cierta cantidad de tiempo
responder "bailar X". Si no se especifica una cantidad, responder "bailar 5".
4. Si lo recibido es similar a "chiste" responder un chiste original
5. Si lo recibido es similar a "adiós" o "apagar" responder "shutdown" y terminar la conversación."""}
self.messages = [self.context]
# función que se ejecuta al entrar al estado
def execute(self, userdata):
# se extrae la solicitud del usuario
content = userdata.prompt
# se agrega la solicitud del usuario al contexto
self.messages.append({"role": "user", "content": content})
if not content:
print("🤷♂️ No has dicho nada")
return "aborted"
# se envía el mensaje para ser procesado por ChatGPT (gpt-4)
response = self.client.chat.completions.create(model="gpt-4", messages=self.messages)
# se extrae la respuesta de ChatGPT
response_content = response.choices[0].message.content
self.messages.append({"role": "assistant", "content": response_content})
# se imprime la respuesta de ChatGPT para debugging
print(f"[bold green]> [/bold green] [green]{response_content}[/green]")
# reemplazar los caracteres especiales por espacios
response_content = response_content.replace("¿", " ")
response_content = response_content.replace("¡", " ")
# si la respuesta es "shutdown" terminar la conversación
if response_content == "shutdown":
speak("Apagando todos los sistemas")
return "succeeded"
# extraer la instrucción de la respuesta
instruccion = response_content.split()[0]
# si la instrucción es una de las siguientes, extraer los parámetros y pasar al estado correspondiente
if instruccion == "avanzar":
userdata.distance = response_content.split()[1]
return "avanzar"
elif instruccion == "girar":
userdata.direction = response_content.split()[1]
userdata.angle = response_content.split()[2]
return "girar"
elif instruccion == "bailar":
userdata.time = response_content.split()[1]
return "bailar"
# si la instruccion no es ninguna de las anteriores pasar al estado Chat
else:
userdata.response = response_content
return "chat"
# Inicializar el nodo de ROS y la máquina de estados principal
def getInstance():
# Inicializar el nodo de ROS
rospy.init_node('gpt_machine')
# Inicializar la máquina de estados
sm = smach.StateMachine(outcomes=[
'succeeded',
'aborted',
])
# Agregar los estados a las máquinas de estados, notar que las maquinas de acciones nunca fallan
# pasan a listen directamente
with sm:
smach.StateMachine.add('Listen', Listen(),
transitions = {
'succeeded':'GPT',
'failed': 'Listen',
'aborted': 'aborted'
})
smach.StateMachine.add('GPT', GPT(),
transitions = {
'aborted': 'Listen',
'succeeded': 'succeeded',
'avanzar': 'Avanzar',
'girar': 'Girar',
'bailar': 'Bailar',
'chat': 'Chat'
})
smach.StateMachine.add('Avanzar', Avanzar(),
transitions = {
'succeeded':'Listen'
})
smach.StateMachine.add('Girar', Girar(),
transitions = {
'succeeded':'Listen'
})
smach.StateMachine.add('Bailar', Bailar(),
transitions = {
'succeeded':'Listen'
})
smach.StateMachine.add('Chat', Chat(),
transitions = {
'succeeded':'Listen'
})
# Iniciar el servidor de introspección de ROS para ver el diagrama de flujo de la maquina de estados
sis = smach_ros.IntrospectionServer('server_name', sm, '/SM_ROOT')
sis.start()
# Ejecutar la máquina principal
sm.execute()
# Mantener el nodo de ROS activo
rospy.spin()
sis.stop()
if __name__ == '__main__':
getInstance()
| [
"\n\t\t\t\t\t\t\t\tNombre: DuckNorris\n\t\t\t\t\t\t\t\tDescripción: Eres un vehículo autónomo programado para los habitantes\n\t\t\t\t\t\t\t\tde una ciudad de patos llamada Duckietown con 100 habitantes. Tu misión es\n\t\t\t\t\t\t\t\ttransportar a los patos de un lugar a otro de la ciudad,\n\t\t\t\t\t\t\t\trespondiendo a sus preguntas y resolviendo sus dudas. Responde\n\t\t\t\t\t\t\t\ttodas las dudas de manera breve y concisa en español, sin tildes. ¡Buena suerte!\n\t\t\t\t\t\t\t\tInstrucciones: Toda instrucción recibida debe ser transformada a una de las siguientes\n\t\t\t\t\t\t\t\topcines en el formato indicado y responder solamente eso. Si la instrucción no es ninguna de las siguientes\n\t\t\t\t\t\t\t\tresponder según el contexto.\n\n\t\t\t\t\t\t\t\t1. Si lo recibido es similar a \"avanzar\" una cierta distancia\n\t\t\t\t\t\t\t\tresponder \"avanzar X cm\".\n\n\t\t\t\t\t\t\t\t2. Si lo recibido es similar a \"girar\" en una cierta direccion un cierto angulo responder,\n\t\t\t\t\t\t\t\t\"girar direccion angulo\". Si no se especifica un angulo responder \"girar direccion 360\". Si no\n\t\t\t\t\t\t\t\tse especifica una direccion responder \"girar izquierda angulo\". Si el angulo se especifica en\n\t\t\t\t\t\t\t\tradianes transformarlo a grados. \n\n\t\t\t\t\t\t\t\t3. Si lo recibido es similar a \"bailar\" una cierta cantidad de tiempo\n\t\t\t\t\t\t\t\tresponder \"bailar X\". Si no se especifica una cantidad, responder \"bailar 5\".\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t4. Si lo recibido es similar a \"chiste\" responder un chiste original\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t5. Si lo recibido es similar a \"adiós\" o \"apagar\" responder \"shutdown\" y terminar la conversación."
] |
2024-01-10 | abhisom2912/bot-service | bot_service~user_facing_apis~utilities~fetch_data.py |
from transformers import GPT2TokenizerFast
import numpy as np
from github import Github
from dotenv import dotenv_values
import time
import pyparsing as pp
import openai
import pandas as pd
import tiktoken
from nltk.tokenize import sent_tokenize
import nltk
import requests
import ssl
from utilities.scrapers.gitbook_scraper import *
from utilities.scrapers.pdf_parse_seq import *
from utilities.scrapers.medium_parser import *
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
nltk.download('punkt')
config = dotenv_values("../.env")
openai.api_key = config['OPENAI_API_KEY']
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
COMPLETIONS_MODEL = "text-davinci-003"
EMBEDDING_MODEL = "text-embedding-ada-002"
MAX_SECTION_LEN = 500
SEPARATOR = "\n* "
ENCODING = "cl100k_base"
min_token_limit = 10
EMBEDDING_COST = 0.0004
COMPLETIONS_COST = 0.03
max_len = 1500
encoding = tiktoken.get_encoding(ENCODING)
separator_len = len(encoding.encode(SEPARATOR))
COMPLETIONS_API_PARAMS = {
# We use temperature of 0.0 because it gives the most predictable, factual answer.
"temperature": 0.0,
"max_tokens": 300,
"model": COMPLETIONS_MODEL,
}
# functions to fetch data
def find_all(s, ch):
previous_ind = 0
array = []
length = len(s)
while 1:
try:
ind = s.index(ch)
array.append(ind + previous_ind)
s = s[ind + len(ch):length]
previous_ind = previous_ind + ind + len(ch)
except ValueError:
break
return array
def remove_unwanted_char(s):
code_separator = "```"
index_array = find_all(s, code_separator)
i = 0
if len(index_array) % 2 == 1:
index_array.append(len(s))
while i < len(index_array):
start_index = index_array[i]
i = i+1
end_index = index_array[i]
orig_string = s[start_index:end_index]
replaced_string = orig_string.replace('#', '--')
s = s.replace(orig_string, replaced_string)
i = i+1
return s
def get_needed_hash(s):
s_array = s.split("\n")
i = len(s_array) - 1
req_no_of_hash = 2
while i > 0:
if s_array[i].find("#") != -1:
req_no_of_hash = s_array[i].count('#') + 1
break
i = i - 1
no_hash = 0
hash_string = ''
while no_hash < req_no_of_hash:
hash_string = hash_string + '#'
no_hash = no_hash + 1
return hash_string
def cleanup_data(s):
s = remove_unwanted_char(s)
s = s.replace('<details>', '')
s = s.replace('</details>', '')
s = s.replace('</b></summary>', '')
# hash_string = get_needed_hash(s[0:s.find('<summary><b>')])
hash_string = ''
s = s.replace('<summary><b>', hash_string)
return s
def clean_content(content):
s1 = '<Section'
s2 = '</Section>'
remove_content = find_between(content, s1, s2)
content = content.replace(remove_content,'').replace(s1, '').replace(s2, '')
return content
def read_docs(github_repo, github_directory):
g = Github(config['GITHUB_ACCESS_TOKEN'])
repo = g.get_repo(github_repo)
title_stack = []
contents = repo.get_contents("")
file_content = ''
while contents:
try:
file_content = contents.pop(0)
except Exception:
pass
if file_content.type == "dir":
contents.extend(repo.get_contents(file_content.path))
else:
if (github_directory != '' and file_content.path.find(github_directory) == -1): ## remove orchestrator line later
continue
if file_content.name.endswith('md') or file_content.name.endswith('mdx'):
file_contents = repo.get_contents(file_content.path)
title = pp.AtLineStart(pp.Word("#")) + pp.rest_of_line
sample = file_contents.decoded_content.decode()
sample = cleanup_data(sample)
title_stack.append([0, 'start_of_file']) # level, title, content, path
if sample.split('\n')[0] == '---':
title_stack[-1].append('')
title_stack[-1].append(file_content.path.replace(github_directory, ''))
title_stack.append([1, sample.split('\n')[1].split(':')[1].lstrip()])
sample = sample.split('---')[2]
last_end = 0
for t, start, end in title.scan_string(sample):
# save content since last title in the last item in title_stack
title_stack[-1].append(clean_content(sample[last_end:start].lstrip("\n")))
title_stack[-1].append(file_content.path.replace(github_directory, ''))
# add a new entry to title_stack
marker, title_content = t
level = len(marker)
title_stack.append([level, title_content.lstrip()])
# update last_end to the end of the current match
last_end = end
# add trailing text to the final parsed title
title_stack[-1].append(clean_content(sample[last_end:]))
title_stack[-1].append(file_content.path.replace(github_directory, ''))
return title_stack
def create_data_for_docs(protocol_title, title_stack, doc_link, doc_type):
heads = {}
max_level = 0
nheadings, ncontents, ntitles, nlinks = [], [], [], []
outputs = []
for level, header, content, dir in title_stack:
final_header = header
dir_header = ''
if doc_type == 'pdf':
content_link = doc_link
title = protocol_title + " - whitepaper"
elif doc_type == 'gitbook':
content_link = dir
title = protocol_title + " - whitepaper"
dir_elements = dir.replace('https://', '').split('/')
element_len = 1
while element_len < len(dir_elements) - 1:
dir_header += dir_elements[element_len].replace('-', ' ') + ': '
element_len += 1
elif doc_type == 'medium':
content_link = dir
title = protocol_title + " - articles"
else:
element_len = 1
dir_elements = dir.split('/')
content_link = doc_link + '/' + dir_elements[0]
sub = 1
title = protocol_title + " - " + dir_elements[0]
if dir_elements[len(dir_elements) - sub].find('README') != -1:
sub = sub + 1
while element_len < len(dir_elements) - sub:
dir_header = dir_header + dir_elements[element_len] + ': '
element_len = element_len + 1
element_len = 1
while element_len < len(dir_elements) - sub + 1:
if dir_elements[element_len].find('.md'):
link = dir_elements[element_len].replace('.mdx', '').replace('.md', '')
content_link = content_link + '/' + link
element_len = element_len + 1
if level > 0:
heads[level] = header
if level > max_level:
max_level = level
while max_level > level:
try:
heads.pop(max_level)
except Exception:
pass
max_level = max_level - 1
i = level - 1
while i > 0:
try:
final_header = heads[i] + ': ' + final_header
except Exception:
pass
i = i - 1
final_header = dir_header + final_header
if final_header.find('start_of_file') == -1:
if content.strip() == '':
continue
nheadings.append(final_header.strip())
ncontents.append(content)
ntitles.append(title)
nlinks.append(content_link)
ncontent_ntokens = [
count_tokens(c)
+ 3
+ count_tokens(" ".join(h.split(" ")[1:-1]))
- (1 if len(c) == 0 else 0)
for h, c in zip(nheadings, ncontents)
]
for title, h, c, t, l in zip(ntitles, nheadings, ncontents, ncontent_ntokens, nlinks):
if (t < max_len and t > min_token_limit):
outputs += [(title, h, c, t, l)]
elif (t >= max_len):
outputs += [(title, h, reduce_long(c, max_len), count_tokens(reduce_long(c, max_len)), l)]
return outputs
def final_data_for_openai(outputs):
res = []
res += outputs
df = pd.DataFrame(res, columns=["title", "heading", "content", "tokens", "link"])
df = df[df.tokens>10] # to ensure really small and insignificant data doesn't get indexed
df = df.drop_duplicates(['title','heading'])
df = df.reset_index().drop('index',axis=1) # reset index
df = df.set_index(["title", "heading"])
return df
def count_tokens(text: str) -> int:
"""count the number of tokens in a string"""
return len(tokenizer.encode(text))
def find_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.rindex(last, start)
return s[start:end]
except ValueError:
return ""
def reduce_long(
long_text: str, long_text_tokens: bool = False, max_len: int = 590
) -> str:
"""
Reduce a long text to a maximum of `max_len` tokens by potentially cutting at a sentence end
"""
if not long_text_tokens:
long_text_tokens = count_tokens(long_text)
if long_text_tokens > max_len:
sentences = sent_tokenize(long_text.replace("\n", " "))
ntokens = 0
for i, sentence in enumerate(sentences):
ntokens += 1 + count_tokens(sentence)
if ntokens > max_len:
return ". ".join(sentences[:i][:-1]) + "."
return long_text
def get_embedding(text: str, model: str = EMBEDDING_MODEL):
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"], result["usage"]["total_tokens"]
def compute_doc_embeddings(df: pd.DataFrame):
"""
Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
"""
embedding_dict = {}
total_tokens_used = 0
for idx, r in df.iterrows():
embedding, tokens = get_embedding(r.content)
embedding_dict[idx] = embedding
total_tokens_used = total_tokens_used + tokens
time.sleep(7)
cost_incurred = total_tokens_used * EMBEDDING_COST / 1000
print(cost_incurred)
return embedding_dict, cost_incurred
def read_from_github(protocol_title, github_link, github_doc_link, github_directory):
github_repo = github_link.partition("github.com/")[2]
print(github_repo)
title_stack = read_docs(github_repo, github_directory)
outputs = create_data_for_docs(protocol_title, title_stack, github_doc_link, 'github')
print(outputs)
df = final_data_for_openai(outputs)
print(df.head)
document_embeddings, cost_incurred = compute_doc_embeddings(df)
print(len(df), " rows in the data.")
return outputs, document_embeddings, cost_incurred
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def add_data_array(file_path, content):
title_stack = []
title = pp.AtLineStart(pp.Word("#")) + pp.rest_of_line
title_stack.append([0, 'start_of_file'])
if content.split('\n')[0] == '---':
title_stack[-1].append('')
title_stack[-1].append(file_path)
title_stack.append([1, content.split('\n')[1].split(':')[1].lstrip()])
content = content.split('---')[2]
last_end = 0
for t, start, end in title.scan_string(content):
# save content since last title in the last item in title_stack
title_stack[-1].append(content[last_end:start].lstrip("\n"))
title_stack[-1].append(file_path)
# add a new entry to title_stack
marker, title_content = t
level = len(marker)
title_stack.append([level, title_content.lstrip()])
# update last_end to the end of the current match
last_end = end
# add trailing text to the final parsed title
title_stack[-1].append(content[last_end:])
title_stack[-1].append(file_path)
return title_stack
# handling request to get data from Gitbook
def get_data_from_gitbook(gitbook_data_type, gitbook_link, protocol_title):
https_str = "https://"
if gitbook_link[len(gitbook_link)-1] == "/":
gitbook_link = gitbook_link[0 : len(gitbook_link)-1]
inter_str = gitbook_link.replace(https_str, '')
base_url = https_str + (inter_str.split('/', 1)[0] if len(inter_str.split('/', 1)) > 1 else inter_str)
first_url = '/' + inter_str.split('/', 1)[1] if len(inter_str.split('/', 1)) > 1 else ''
title_stack = get_gitbook_data(base_url, first_url, gitbook_data_type)
# title_stack = get_gitbook_data(gitbook_link, '', gitbook_data_type)
outputs = create_data_for_docs(protocol_title, title_stack, '', 'gitbook')
print('Outputs created for gitbook data')
df = final_data_for_openai(outputs)
print(df.head)
document_embeddings, cost_incurred = compute_doc_embeddings(df)
print('Embeddings created, sending data to db...')
return outputs, document_embeddings, cost_incurred
# handling request to get data from a PDF document
def get_pdf_whitepaper_data(document, table_of_contents_pages, whitepaper_link, protocol_title):
content = convert_to_md_format(document, table_of_contents_pages)
title_stack = add_data_array('whitepaper', content)
outputs = create_data_for_docs(protocol_title, title_stack, whitepaper_link, 'pdf')
print('Outputs created for whitepaper data')
df = final_data_for_openai(outputs)
print(df.head)
document_embeddings, cost_incurred = compute_doc_embeddings(df)
print('Embeddings created, sending data to db...')
return outputs, document_embeddings, cost_incurred
# handling request to get data from Medium
def get_data_from_medium(username, valid_articles_duration_days, protocol_title):
title_stack = get_medium_data(username, valid_articles_duration_days)
outputs = create_data_for_docs(protocol_title, title_stack, '', 'medium')
print('Outputs created for gitbook data')
df = final_data_for_openai(outputs)
print(df.head)
document_embeddings, cost_incurred = compute_doc_embeddings(df)
print('Embeddings created, sending data to db...')
return outputs, document_embeddings, cost_incurred
def get_data_for_mod_responses(responses, protocol_title):
outputs = create_data_for_mod_responses(responses, protocol_title)
df = final_data_for_openai(outputs)
document_embeddings, cost_incurred = compute_doc_embeddings(df)
return outputs, document_embeddings, cost_incurred
def create_data_for_mod_responses(responses, protocol_title):
nheadings, ncontents, ntitles, nlinks = [], [], [], []
outputs = []
for response in responses:
nheadings.append(response['question'])
ncontents.append(response['answer'])
ntitles.append(protocol_title + ' - mod responses')
nlinks.append('')
ncontent_ntokens = [
count_tokens(c)
+ 3
+ count_tokens(" ".join(h.split(" ")[1:-1]))
- (1 if len(c) == 0 else 0)
for h, c in zip(nheadings, ncontents)
]
for title, h, c, t, l in zip(ntitles, nheadings, ncontents, ncontent_ntokens, nlinks):
if (t < max_len and t > min_token_limit):
outputs += [(title, h, c, t, l)]
elif (t >= max_len):
outputs += [(title, h, reduce_long(c, max_len), count_tokens(reduce_long(c, max_len)), l)]
return outputs
# Functions to help answer queries
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query_embedding: list[float],
contexts: dict[tuple[str, str], np.array]):
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
def construct_prompt(question: str, question_embedding: list[float], context_embeddings: dict, df: pd.DataFrame, default_answer: str):
"""
Fetch relevant
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question_embedding,
context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes_string = []
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + separator_len
if chosen_sections_len > MAX_SECTION_LEN:
break
chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
chosen_sections_indexes_string.append(str(section_index))
chosen_sections_indexes.append(section_index)
# Useful diagnostic information
print("Selected ", len(chosen_sections), " document sections:")
print("\n".join(chosen_sections_indexes_string))
header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below,
say """ + default_answer + """\n\nContext:\n"""
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:", chosen_sections_indexes
def answer_query_with_context(
query: str,
question_embedding: list,
df: pd.DataFrame,
document_embeddings: dict[tuple[str, str], np.array],
default_answer: str,
show_prompt: bool = False
):
prompt, chosen_sections_indexes = construct_prompt(
query,
question_embedding,
document_embeddings,
df,
default_answer
)
if show_prompt:
print(prompt)
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
# calculating the cost incurred in using OpenAI to answer the question
answer_cost = response["usage"]["total_tokens"] * COMPLETIONS_COST / 1000
links = []
if len(chosen_sections_indexes) > 0 and df.loc[chosen_sections_indexes[0]]['link'] == '':
return response["choices"][0]["text"].strip(" \n"), answer_cost, links
for section_index in chosen_sections_indexes:
document_section = df.loc[section_index]
link = document_section['link']
if link != '' and not (link in links):
links.append(link)
if len(links) >= 2:
break
return response["choices"][0]["text"].strip(" \n"), answer_cost, links | [] |
2024-01-10 | abhisom2912/bot-service | bot_service~wip~telegram_bot_openai.py | import json
import requests
import time
import urllib
from transformers import GPT2TokenizerFast
import numpy as np
from github import Github
import os
import pyparsing as pp
import openai
import pandas as pd
import tiktoken
from nltk.tokenize import sent_tokenize
from typing import List
from typing import Dict
from typing import Tuple
import nltk
TOKEN = "5922680401:AAEKq1oh0hP1RBky4ymL7lbXzhqnfTCRc3Q"
URL = "https://api.telegram.org/bot{}/".format(TOKEN)
nltk.download('punkt')
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
COMPLETIONS_MODEL = "text-davinci-003"
EMBEDDING_MODEL = "text-embedding-ada-002"
MAX_SECTION_LEN = 500
SEPARATOR = "\n* "
ENCODING = "cl100k_base"
encoding = tiktoken.get_encoding(ENCODING)
separator_len = len(encoding.encode(SEPARATOR))
# f"Context separator contains {separator_len} tokens"
COMPLETIONS_API_PARAMS = {
# We use temperature of 0.0 because it gives the most predictable, factual answer.
"temperature": 0.0,
"max_tokens": 300,
"model": COMPLETIONS_MODEL,
}
def find_all(s, ch):
previous_ind = 0
array = []
length = len(s)
while 1:
try:
ind = s.index(ch)
array.append(ind + previous_ind)
s = s[ind + len(ch):length]
previous_ind = previous_ind + ind + len(ch)
except ValueError:
break
return array
def remove_unwanted_char(s):
code_separator = "```"
index_array = find_all(s, code_separator)
i = 0
while i < len(index_array):
start_index = index_array[i]
i = i+1
end_index = index_array[i]
orig_string = s[start_index:end_index]
replaced_string = orig_string.replace('#', '--')
s = s.replace(orig_string, replaced_string)
i = i+1
return s
def get_needed_hash(s):
s_array = s.split("\n")
i = len(s_array) - 1
req_no_of_hash = 2
while i > 0:
if s_array[i].find("#") != -1:
req_no_of_hash = s_array[i].count('#') + 1
break
i = i - 1
no_hash = 0
hash_string = ''
while no_hash < req_no_of_hash:
hash_string = hash_string + '#'
no_hash = no_hash + 1
return hash_string
def cleanup_data(s):
s = remove_unwanted_char(s)
s = s.replace('<details>', '')
s = s.replace('</details>', '')
s = s.replace('</b></summary>', '')
# hash_string = get_needed_hash(s[0:s.find('<summary><b>')])
hash_string = ''
s = s.replace('<summary><b>', hash_string)
return s
def read_docs() -> []:
g = Github(os.getenv('GITTOKEN'))
repo = g.get_repo('abhisom2912/nft-drop-starter-project')
title_stack=[]
contents = repo.get_contents("")
while contents:
try:
file_content = contents.pop(0)
except Exception:
pass
if file_content.type == "dir":
contents.extend(repo.get_contents(file_content.path))
else:
if file_content.name.endswith('md') or file_content.name.endswith('mdx'):
file_contents = repo.get_contents(file_content.path)
title = pp.AtLineStart(pp.Word("#")) + pp.rest_of_line
sample = file_contents.decoded_content.decode()
sample = cleanup_data(sample)
title_stack.append([0, 'start_of_file'])
if sample.split('\n')[0] == '---':
title_stack[-1].append('')
title_stack[-1].append(file_content.path)
title_stack.append([1, sample.split('\n')[1].split(':')[1].lstrip()])
sample = sample.split('---')[2]
last_end = 0
for t, start, end in title.scan_string(sample):
# save content since last title in the last item in title_stack
title_stack[-1].append(sample[last_end:start].lstrip("\n"))
title_stack[-1].append(file_content.path)
# add a new entry to title_stack
marker, title_content = t
level = len(marker)
title_stack.append([level, title_content.lstrip()])
# update last_end to the end of the current match
last_end = end
# add trailing text to the final parsed title
title_stack[-1].append(sample[last_end:])
title_stack[-1].append(file_content.path)
return title_stack
def create_data_for_docs() -> []:
title_stack = read_docs()
heads = {}
max_level = 0
nheadings, ncontents, ntitles = [], [], []
outputs = []
max_len = 1500
s1 = '<Section'
s2 = '</Section>'
for level, header, content, dir in title_stack:
final_header = header
dir_elements = dir.split('/')
element_len = 1
dir_header = ''
sub = 1
title = 'Router Protocol' + " - " + dir_elements[0]
if dir_elements[len(dir_elements) - sub].find('README') != -1:
sub = sub + 1
while element_len < len(dir_elements) - sub:
dir_header = dir_header + dir_elements[element_len] + ': '
element_len = element_len + 1
if level > 0:
heads[level] = header
if level > max_level:
max_level = level
while max_level > level:
try:
heads.pop(max_level)
except Exception:
pass
max_level = max_level - 1
i = level - 1
while i > 0:
try:
final_header = heads[i] + ': ' + final_header
except Exception:
pass
i=i-1
final_header = dir_header + final_header
if final_header.find('start_of_file') == -1:
remove_content = find_between(content, s1, s2)
content = content.replace(remove_content,'').replace(s1, '').replace(s2, '')
if content.strip() == '':
continue
nheadings.append(final_header.strip())
ncontents.append(content)
ntitles.append(title)
ncontent_ntokens = [
count_tokens(c)
+ 3
+ count_tokens(" ".join(h.split(" ")[1:-1]))
- (1 if len(c) == 0 else 0)
for h, c in zip(nheadings, ncontents)
]
outputs += [(title, h, c, t) if t<max_len
else (title, h, reduce_long(c, max_len), count_tokens(reduce_long(c,max_len)))
for title, h, c, t in zip(ntitles, nheadings, ncontents, ncontent_ntokens)]
return outputs
def final_data_for_openai(outputs):
res = []
res += outputs
df = pd.DataFrame(res, columns=["title", "heading", "content", "tokens"])
df = df[df.tokens>10] # to ensure really small and insignificant data doesn't get indexed
df = df.drop_duplicates(['title','heading'])
df = df.reset_index().drop('index',axis=1) # reset index
return df
def count_tokens(text: str) -> int:
"""count the number of tokens in a string"""
return len(tokenizer.encode(text))
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.rindex( last, start )
return s[start:end]
except ValueError:
return ""
def reduce_long(
long_text: str, long_text_tokens: bool = False, max_len: int = 590
) -> str:
"""
Reduce a long text to a maximum of `max_len` tokens by potentially cutting at a sentence end
"""
if not long_text_tokens:
long_text_tokens = count_tokens(long_text)
if long_text_tokens > max_len:
sentences = sent_tokenize(long_text.replace("\n", " "))
ntokens = 0
for i, sentence in enumerate(sentences):
ntokens += 1 + count_tokens(sentence)
if ntokens > max_len:
return ". ".join(sentences[:i][:-1]) + "."
return long_text
def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> List[float]:
time.sleep(1.5)
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"]
def compute_doc_embeddings(df: pd.DataFrame) -> Dict[Tuple[str, str], List[float]]:
"""
Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
"""
return {
idx: get_embedding(r.content) for idx, r in df.iterrows()
}
def load_embeddings(fname: str) -> Dict[Tuple[str, str], List[float]]:
"""
Read the document embeddings and their keys from a CSV.
fname is the path to a CSV with exactly these named columns:
"title", "heading", "0", "1", ... up to the length of the embedding vectors.
"""
df = pd.read_csv(fname, header=0)
max_dim = max([int(c) for c in df.columns if c != "title" and c != "heading"])
return {
(r.title, r.heading): [r[str(i)] for i in range(max_dim + 1)] for _, r in df.iterrows()
}
def vector_similarity(x: List[float], y: List[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query: str, contexts: Dict[Tuple[str, str], np.array]) -> List[Tuple[float, Tuple[str, str]]]:
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
def construct_prompt(question: str, context_embeddings: dict, df: pd.DataFrame) -> str:
"""
Fetch relevant parts of the data
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + separator_len
if chosen_sections_len > MAX_SECTION_LEN:
break
chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
chosen_sections_indexes.append(str(section_index))
# Useful diagnostic information
print("Selected {len(chosen_sections)} document sections:")
print("\n".join(chosen_sections_indexes))
header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know the answer to this question."\n\nContext:\n"""
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:"
def answer_query_with_context(
query: str,
df: pd.DataFrame,
document_embeddings: Dict[Tuple[str, str], np.array],
show_prompt: bool = False
) -> str:
prompt = construct_prompt(
query,
document_embeddings,
df
)
if show_prompt:
print(prompt)
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
return response["choices"][0]["text"].strip(" \n")
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js
def get_updates(offset=None):
url = URL + "getUpdates?timeout=100"
if offset:
url += "&offset={}".format(offset)
js = get_json_from_url(url)
return js
def get_last_update_id(updates):
update_ids = []
for update in updates["result"]:
update_ids.append(int(update["update_id"]))
return max(update_ids)
def echo_all(updates, df, document_embeddings):
for update in updates["result"]:
try:
text = update["message"]["text"]
chat = update["message"]["chat"]["id"]
send_message(text, chat, df, document_embeddings)
except Exception as e:
print(e)
def send_message(text, chat_id, df, document_embeddings):
print(text)
response = answer_query_with_context(text, df, document_embeddings)
text = urllib.parse.quote_plus(text) + ': this is response'
url = URL + "sendMessage?text={}&chat_id={}".format(response, chat_id)
get_url(url)
def main():
openai.api_key = os.getenv('OPENAI_API_KEY')
outputs = create_data_for_docs()
df = final_data_for_openai(outputs)
print(df.head)
df = df.set_index(["title", "heading"])
document_embeddings = compute_doc_embeddings(df)
print("{len(df)} rows in the data.")
last_update_id = None
while True:
updates = get_updates(last_update_id)
if len(updates["result"]) > 0:
last_update_id = get_last_update_id(updates) + 1
echo_all(updates, df, document_embeddings)
time.sleep(0.5)
if __name__ == '__main__':
main() | [] |
2024-01-10 | abhisom2912/bot-service | bot_service~wip~all_bot_openai_py310_cmd.py | from asgiref.sync import sync_to_async
import discord
from transformers import GPT2TokenizerFast
import sys
import numpy as np
from github import Github
from dotenv import dotenv_values
import time
import pyparsing as pp
import openai
import pandas as pd
import tiktoken
from nltk.tokenize import sent_tokenize
import nltk
import urllib
import requests
import json
import multiprocessing
import ssl
import gspread
from itertools import islice
from gitbook_scraper import * # import from the self_run_bot folder since we have removed gitbook_scraper.py from this folder
from pdf_parse_seq import * # import from the self_run_bot folder since we have removed pdf_parse_seq.py from this folder
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
nltk.download('punkt')
config = dotenv_values("../.env")
openai.api_key = config['OPENAI_API_KEY']
TELEGRAM_TOKEN = config['TELEGRAM_TOKEN']
TELEGRAM_API_URL = "https://api.telegram.org/bot{}/".format(TELEGRAM_TOKEN)
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
COMPLETIONS_MODEL = "text-davinci-003"
EMBEDDING_MODEL = "text-embedding-ada-002"
MAX_SECTION_LEN = 500
SEPARATOR = "\n* "
ENCODING = "cl100k_base"
min_token_limit = 10
encoding = tiktoken.get_encoding(ENCODING)
separator_len = len(encoding.encode(SEPARATOR))
# f"Context separator contains {separator_len} tokens"
protocol_id = "1"
bot_description = "Router protocol relayer info"
COMPLETIONS_API_PARAMS = {
# We use temperature of 0.0 because it gives the most predictable, factual answer.
"temperature": 0.0,
"max_tokens": 300,
"model": COMPLETIONS_MODEL,
}
def find_all(s, ch):
previous_ind = 0
array = []
length = len(s)
while 1:
try:
ind = s.index(ch)
array.append(ind + previous_ind)
s = s[ind + len(ch):length]
previous_ind = previous_ind + ind + len(ch)
except ValueError:
break
return array
def remove_unwanted_char(s):
code_separator = "```"
index_array = find_all(s, code_separator)
i = 0
while i < len(index_array):
start_index = index_array[i]
i = i+1
end_index = index_array[i]
orig_string = s[start_index:end_index]
replaced_string = orig_string.replace('#', '--')
s = s.replace(orig_string, replaced_string)
i = i+1
return s
def get_needed_hash(s):
s_array = s.split("\n")
i = len(s_array) - 1
req_no_of_hash = 2
while i > 0:
if s_array[i].find("#") != -1:
req_no_of_hash = s_array[i].count('#') + 1
break
i = i - 1
no_hash = 0
hash_string = ''
while no_hash < req_no_of_hash:
hash_string = hash_string + '#'
no_hash = no_hash + 1
return hash_string
def cleanup_data(s):
s = remove_unwanted_char(s)
s = s.replace('<details>', '')
s = s.replace('</details>', '')
s = s.replace('</b></summary>', '')
# hash_string = get_needed_hash(s[0:s.find('<summary><b>')])
hash_string = ''
s = s.replace('<summary><b>', hash_string)
return s
def read_docs(github_repo):
g = Github(config['GITHUB_ACCESS_TOKEN'])
repo = g.get_repo(github_repo)
title_stack=[]
contents = repo.get_contents("")
while contents:
try:
file_content = contents.pop(0)
except Exception:
pass
if file_content.type == "dir":
contents.extend(repo.get_contents(file_content.path))
else:
if file_content.path.find('relayer') == -1: ## remove this line later
continue ## remove this line later
if file_content.name.endswith('md') or file_content.name.endswith('mdx'):
file_contents = repo.get_contents(file_content.path)
title = pp.AtLineStart(pp.Word("#")) + pp.rest_of_line
sample = file_contents.decoded_content.decode()
sample = cleanup_data(sample)
title_stack.append([0, 'start_of_file'])
if sample.split('\n')[0] == '---':
title_stack[-1].append('')
title_stack[-1].append(file_content.path)
title_stack.append([1, sample.split('\n')[1].split(':')[1].lstrip()])
sample = sample.split('---')[2]
last_end = 0
for t, start, end in title.scan_string(sample):
# save content since last title in the last item in title_stack
title_stack[-1].append(sample[last_end:start].lstrip("\n"))
title_stack[-1].append(file_content.path)
# add a new entry to title_stack
marker, title_content = t
level = len(marker)
title_stack.append([level, title_content.lstrip()])
# update last_end to the end of the current match
last_end = end
# add trailing text to the final parsed title
title_stack[-1].append(sample[last_end:])
title_stack[-1].append(file_content.path)
return title_stack
def create_data_for_docs(protocol_title, title_stack):
heads = {}
max_level = 0
nheadings, ncontents, ntitles = [], [], []
outputs = []
max_len = 1500
s1 = '<Section'
s2 = '</Section>'
for level, header, content, dir in title_stack:
final_header = header
dir_elements = dir.split('/')
element_len = 1
dir_header = ''
sub = 1
title = protocol_title + " - " + dir_elements[0]
if dir_elements[len(dir_elements) - sub].find('README') != -1:
sub = sub + 1
while element_len < len(dir_elements) - sub:
dir_header = dir_header + dir_elements[element_len] + ': '
element_len = element_len + 1
if level > 0:
heads[level] = header
if level > max_level:
max_level = level
while max_level > level:
try:
heads.pop(max_level)
except Exception:
pass
max_level = max_level - 1
i = level - 1
while i > 0:
try:
final_header = heads[i] + ': ' + final_header
except Exception:
pass
i=i-1
final_header = dir_header + final_header
if final_header.find('start_of_file') == -1:
remove_content = find_between(content, s1, s2)
content = content.replace(remove_content,'').replace(s1, '').replace(s2, '')
if content.strip() == '':
continue
nheadings.append(final_header.strip())
ncontents.append(content)
ntitles.append(title)
ncontent_ntokens = [
count_tokens(c)
+ 3
+ count_tokens(" ".join(h.split(" ")[1:-1]))
- (1 if len(c) == 0 else 0)
for h, c in zip(nheadings, ncontents)
]
# outputs += [(title, h, c, t) if t<max_len
# else (title, h, reduce_long(c, max_len), count_tokens(reduce_long(c,max_len)))
# for title, h, c, t in zip(ntitles, nheadings, ncontents, ncontent_ntokens)]
for title, h, c, t in zip(ntitles, nheadings, ncontents, ncontent_ntokens):
if (t<max_len and t>min_token_limit):
outputs += [(title,h,c,t)]
elif(t>=max_len):
outputs += [(title, h, reduce_long(c, max_len), count_tokens(reduce_long(c,max_len)))]
return outputs
def final_data_for_openai(outputs):
res = []
res += outputs
df = pd.DataFrame(res, columns=["title", "heading", "content", "tokens"])
df = df[df.tokens > 10] # to ensure really small and insignificant data doesn't get indexed
df = df.drop_duplicates(['title','heading'])
df = df.reset_index().drop('index',axis=1) # reset index
df = df.set_index(["title", "heading"])
return df
def count_tokens(text: str) -> int:
"""count the number of tokens in a string"""
return len(tokenizer.encode(text))
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.rindex( last, start )
return s[start:end]
except ValueError:
return ""
def reduce_long(
long_text: str, long_text_tokens: bool = False, max_len: int = 590
) -> str:
"""
Reduce a long text to a maximum of `max_len` tokens by potentially cutting at a sentence end
"""
if not long_text_tokens:
long_text_tokens = count_tokens(long_text)
if long_text_tokens > max_len:
sentences = sent_tokenize(long_text.replace("\n", " "))
ntokens = 0
for i, sentence in enumerate(sentences):
ntokens += 1 + count_tokens(sentence)
if ntokens > max_len:
return ". ".join(sentences[:i][:-1]) + "."
return long_text
def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]:
time.sleep(5)
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"]
def compute_doc_embeddings(df: pd.DataFrame) -> dict[tuple[str, str], list[float]]:
"""
Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
"""
return {
idx: get_embedding(r.content) for idx, r in df.iterrows()
}
def load_embeddings(fname: str) -> dict[tuple[str, str], list[float]]:
"""
Read the document embeddings and their keys from a CSV.
fname is the path to a CSV with exactly these named columns:
"title", "heading", "0", "1", ... up to the length of the embedding vectors.
"""
df = pd.read_csv(fname, header=0)
max_dim = max([int(c) for c in df.columns if c != "title" and c != "heading"])
return {
(r.title, r.heading): [r[str(i)] for i in range(max_dim + 1)] for _, r in df.iterrows()
}
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query: str, contexts: dict[tuple[str, str], np.array]) -> list[tuple[float, tuple[str, str]]]:
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
def construct_prompt(question: str, context_embeddings: dict, df: pd.DataFrame) -> str:
"""
Fetch relevant
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + separator_len
if chosen_sections_len > MAX_SECTION_LEN:
break
chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
chosen_sections_indexes.append(str(section_index))
# Useful diagnostic information
print("Selected ", len(chosen_sections), " document sections:")
print("\n".join(chosen_sections_indexes))
header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:"
def answer_query_with_context(
query: str,
df: pd.DataFrame,
document_embeddings: dict[tuple[str, str], np.array],
show_prompt: bool = False
) -> str:
prompt = construct_prompt(
query,
document_embeddings,
df
)
if show_prompt:
print(prompt)
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
return response["choices"][0]["text"].strip(" \n")
def read_from_github(protocol_title, github_repo):
title_stack = read_docs(github_repo)
outputs = create_data_for_docs(protocol_title, title_stack)
print(outputs)
df = final_data_for_openai(outputs)
print(df.head)
df = df.set_index(["title", "heading"])
document_embeddings = compute_doc_embeddings(df)
print(len(df), " rows in the data.")
return outputs, df, document_embeddings
def calculate_new_output(title, heading, content):
nheadings, ncontents, ntitles = [], [], []
outputs = []
max_len = 1500
nheadings.append(heading)
ncontents.append(content)
ntitles.append(title)
ncontent_ntokens = [
count_tokens(c)
+ 3
+ count_tokens(" ".join(h.split(" ")[1:-1]))
- (1 if len(c) == 0 else 0)
for h, c in zip(nheadings, ncontents)
]
for title, h, c, t in zip(ntitles, nheadings, ncontents, ncontent_ntokens):
if (t<max_len and t>min_token_limit):
outputs += [(title,h,c,t)]
elif(t>=max_len):
outputs += [(title, h, reduce_long(c, max_len), count_tokens(reduce_long(c,max_len)))]
return outputs
def add_data(protocol_id, title, heading, content):
outputs, embeddings = retrieve_from_db(protocol_id)
# check to ensure that the output does not already include this entry
for x in outputs:
if(title == x[0] and heading == x[1] and content == x[2]):
print("Data already present")
return "Data already present"
# take title, heading and content and fetch the new outputs
new_outputs = calculate_new_output(title, heading, content)
new_df = final_data_for_openai(new_outputs)
# create an embedding against the newly added data
new_document_embeddings = compute_doc_embeddings(new_df)
# append the new output to the outputs in the database
outputs.extend(new_outputs)
# append the new embedding to the embedding in the database
embeddings.update(new_document_embeddings)
return update_in_db(outputs, embeddings, protocol_id)
def update_data(protocol_id, title, heading, updated_content):
outputs, embeddings = retrieve_from_db(protocol_id)
index_to_delete = -1
for i, x in enumerate(outputs):
if(title == x[0] and heading == x[1]):
if updated_content == x[2]:
return "Updated data already present"
else:
index_to_delete = i
if(index_to_delete > 0):
new_outputs = calculate_new_output(title, heading, updated_content)
new_df = final_data_for_openai(new_outputs)
# create an embedding against the newly added data
new_document_embeddings = compute_doc_embeddings(new_df)
# append the new output to the outputs in the database
outputs.extend(new_outputs)
# append the new embedding to the embedding in the database
embeddings.update(new_document_embeddings)
# deleting the existing entry
updated_outputs, updated_embeddings = delete_entries_by_index(outputs, embeddings, index_to_delete)
return update_in_db(updated_outputs, updated_embeddings, protocol_id)
def delete_data(protocol_id, title, heading):
outputs, embeddings = retrieve_from_db(protocol_id)
index_to_delete = -1
for i, x in enumerate(outputs):
if(title == x[0] and heading == x[1]):
index_to_delete = i
if index_to_delete<0:
return "Title and heading not found"
updated_outputs, updated_embeddings = delete_entries_by_index(outputs, embeddings, index_to_delete)
return update_in_db(updated_outputs, updated_embeddings, protocol_id)
def delete_entries_by_index(outputs, embeddings, index):
outputs.pop(index)
# del embeddings[next(islice(embeddings, index, None))]
return outputs, embeddings
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js
def get_updates(offset=None):
url = TELEGRAM_API_URL + "getUpdates?timeout=100"
if offset:
url += "&offset={}".format(offset)
js = get_json_from_url(url)
return js
def get_last_update_id(updates):
update_ids = []
for update in updates["result"]:
update_ids.append(int(update["update_id"]))
return max(update_ids)
def echo_all(updates, df, document_embeddings):
for update in updates["result"]:
try:
text = update["message"]["text"]
chat = update["message"]["chat"]["id"]
send_message(text, chat, df, document_embeddings)
except Exception as e:
print(e)
def send_message(text, chat_id, df, document_embeddings):
print(text)
response = answer_query_with_context(text, df, document_embeddings)
url = TELEGRAM_API_URL + "sendMessage?text={}&chat_id={}".format(response, chat_id)
get_url(url)
class TelegramBot(multiprocessing.Process):
def __init__(self, df, document_embeddings):
super(TelegramBot, self).__init__()
self.df = df
self.document_embeddings = document_embeddings
def run(self):
print("I'm the process with id: {}".format(self.df))
last_update_id = None
while True:
updates = get_updates(last_update_id)
if len(updates["result"]) > 0:
last_update_id = get_last_update_id(updates) + 1
echo_all(updates, self.df, self.document_embeddings)
time.sleep(0.5)
class DiscordBot(multiprocessing.Process):
def __init__(self, df, document_embeddings):
super(DiscordBot, self).__init__()
self.df = df
self.document_embeddings = document_embeddings
def run(self):
print("I'm the process with id: {}".format(self.df))
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
print(message.content)
if message.author == client.user:
return
if message.content.lower().find(config['DISCORD_BOT_USER_ID'].lower()) != -1:
# replace @scarlett with the Discord name of your bot
answer = answer_query_with_context(message.content, self.df, self.document_embeddings)
await message.channel.send(answer)
question = message.content.replace('<' + config['DISCORD_BOT_USER_ID'] + '> ', '')
send_question_to_db(protocol_id, question, answer)
client.run(config['DISCORD_TOKEN'])
def untuplify_dict_keys(mapping):
string_keys = {json.dumps(k): v for k, v in mapping.items()}
return string_keys
def tuplify_dict_keys(string):
mapping = string
return {tuple(json.loads(k)): v for k, v in mapping.items()}
def send_question_to_db(protocol_id, question, answer):
data_to_post = {"protocol_id": protocol_id, "question": question, "answer": answer}
response = requests.post(config['BASE_API_URL'] + "question/", json=data_to_post)
return response
# adding data to the database
def send_to_db(_id, description, outputs, document_embeddings):
data_to_post = {"_id": _id, "description": description, "data": outputs, "embeddings": untuplify_dict_keys(document_embeddings)}
response = requests.post(config['BASE_API_URL'] + "document/", json=data_to_post)
return response
# fetching outputs and embeddings from the database
def retrieve_from_db(_id):
response = requests.get(config['BASE_API_URL'] + "document/" + _id)
json_response = response.json()
outputs = json_response['data']
document_embeddings = tuplify_dict_keys(json_response['embeddings'])
return outputs, document_embeddings
def update_in_db(outputs, embeddings, _id):
# update the entry in the database
data_to_update = {"data": outputs, "embeddings": untuplify_dict_keys(embeddings)}
response = requests.put(config['BASE_API_URL'] + "document/" + _id, json=data_to_update)
return response
# to add data from google sheet
def add_data_from_sheet(protocol_id, sheet_id, sheet_name):
gc = gspread.service_account('PATH_TO_CREDENTIALS.JSON')
spreadsheet = gc.open_by_key(sheet_id)
worksheet = spreadsheet.worksheet(sheet_name)
rows = worksheet.get_all_records()
print(rows)
df = pd.DataFrame(rows)
data_dict = {}
upload_df = pd.DataFrame()
for index, data in df.iterrows():
if data['Uploaded'] == 'No' or data['Uploaded'] == '':
# Upload to df and embeddings
add_data(protocol_id, data['Title'], data['Heading'], data['Content'])
# Recreate the df to upload back to the gsheet
data_dict['Title'] = data['Title']
data_dict['Heading'] = data['Heading']
data_dict['Content'] = data['Content']
data_dict['Uploaded'] = 'Yes'
print(data_dict)
data_df = pd.DataFrame([data_dict])
upload_df = pd.concat([upload_df, data_df])
def add_data_array(file_path, content):
title_stack = []
title = pp.AtLineStart(pp.Word("#")) + pp.rest_of_line
title_stack.append([0, 'start_of_file'])
if content.split('\n')[0] == '---':
title_stack[-1].append('')
title_stack[-1].append(file_path)
title_stack.append([1, content.split('\n')[1].split(':')[1].lstrip()])
content = content.split('---')[2]
last_end = 0
for t, start, end in title.scan_string(content):
# save content since last title in the last item in title_stack
title_stack[-1].append(content[last_end:start].lstrip("\n"))
title_stack[-1].append(file_path)
# add a new entry to title_stack
marker, title_content = t
level = len(marker)
title_stack.append([level, title_content.lstrip()])
# update last_end to the end of the current match
last_end = end
# add trailing text to the final parsed title
title_stack[-1].append(content[last_end:])
title_stack[-1].append(file_path)
return title_stack
def get_data_from_gitbook(gitbook_data_type, gitbook_link):
content = get_gitbook_data_in_md_format(gitbook_link, '')
print('Gitbook data in md format fetched')
title_stack = add_data_array(gitbook_data_type, content)
outputs = create_data_for_docs(title_stack)
print('Outputs created for gitbook data')
df = final_data_for_openai(outputs)
print(df.head)
df = df.set_index(["title", "heading"])
document_embeddings = compute_doc_embeddings(df)
print('Embeddings created, sending data to db...')
return outputs, df, document_embeddings
def get_pdf_data(type, document):
content = convert_to_md_format(document)
title_stack = add_data_array(type, content)
outputs = create_data_for_docs(title_stack)
print('Outputs created for PDF data')
df = final_data_for_openai(outputs)
print(df.head)
df = df.set_index(["title", "heading"])
document_embeddings = compute_doc_embeddings(df)
print('Embeddings created, sending data to db...')
return outputs, df, document_embeddings
def read_command_line_params():
arguments = {}
n = len(sys.argv)
print("\nArguments passed:", end = " ")
for i in range(1, n):
argument = sys.argv[i].split('=')
arguments[argument[0]] = argument[1]
return arguments
def main():
"""Parameters to provide from the command line
protocol_id (string): protocol_id for which the data needs to be uploaded
protocol_description (string) : description of the protocol for which data is being uploaded
reset_bot | first_start : if you're uploading the data for the first time or if you want upload entirely new data and forget old data
read_from_github (boolean) : true if data needs to be read from Github, else false
github_repo (string) : link to your Github repo containing the docs, mandatory if read_from_github is true (for eg. https://github.com/router-protocol/router-chain-docs)
protocol_title : protocol name, used while uploading data from Github
read_from_gitbook_link (boolean): true if data needs to be read from Gitbook docs, else false
gitbook_link (string) : link to your Gitbook docs, mandatory if read_from_gitbook_link is true
gitbook_data_type : the type of data being uploaded from Gitbook - whitepaper, dev docs, FAQs etc.
read_from_pdf : true if data needs to be read from a PDF document, otherwise false
pdf_path : link to the PDF, mandatory if read_from_pdf is true
read_from_sheet: true if data needs to be read from a Google sheet, else false
sheet_id: ID of the Google Sheet from which the data is to be read, mandatory if read_from_sheet is true
sheet_name: name of the spreadsheet from which the data is to be read, mandatory if read_from_sheet is true
"""
arguments = read_command_line_params()
protocol_id = arguments['protocol_id']
bot_description = arguments['bot_description']
protocol_title = arguments['protocol_title']
outputs = []
# if the user is uploading data for the first time or wants to reupload the data from scratch
if arguments['reset_bot'].lower == 'true' or arguments['first_start'].lower == 'true':
if arguments['read_from_github'].lower == 'true':
try:
github_repo = arguments['github_repo']
except KeyError:
raise Exception("Github link not provided, while read from github is true")
outputs, df, document_embeddings = read_from_github(protocol_title, github_repo)
if len(outputs) > 0:
send_to_db(protocol_id, bot_description, outputs, document_embeddings)
if arguments['read_from_gitbook_link'].lower == 'true':
try:
gitbook_link = arguments['gitbook_link']
except KeyError:
raise Exception("Gitbook link not provided, while read_from_gitbook_link is true")
try:
gitbook_data_type = arguments['gitbook_data_type']
except KeyError:
gitbook_data_type = 'Whitepaper'
outputs, df, document_embeddings = get_data_from_gitbook(gitbook_data_type, gitbook_link)
if len(outputs) > 0:
send_to_db(protocol_id, bot_description, outputs, document_embeddings)
if arguments['read_from_pdf'].lower == 'true':
try:
pdf_path = arguments['pdf_path']
except KeyError:
raise Exception("PDF path not provided, while read_from_pdf is true")
outputs, df, document_embeddings = get_pdf_data('PDF', pdf_path)
if len(outputs) > 0:
send_to_db(protocol_id, bot_description, outputs, document_embeddings)
if arguments['read_from_sheet'].lower == 'True'.lower():
sheet_id = arguments['sheet_id']
sheet_name = arguments['sheet_name']
add_data_from_sheet(protocol_id, sheet_id, sheet_name)
# p = TelegramBot(df, document_embeddings)
# p.start()
outputs_from_database, document_embeddings_from_database = retrieve_from_db(protocol_id)
df_from_database = final_data_for_openai(outputs_from_database)
p = DiscordBot(df_from_database, document_embeddings_from_database)
p.start()
if __name__ == '__main__':
main() | [] |
2024-01-10 | abhisom2912/bot-service | bot_service~wip~fuzzy_match.py | from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import time
import openai
import numpy as np
from dotenv import dotenv_values
# this script can be used to search the existing question database to answer previously answered questions
# this will bring down the cost of running such a service by fetching answer to every query via OpenAI matching
EMBEDDING_MODEL = "text-embedding-ada-002"
config = dotenv_values("../.env")
openai.api_key = config['OPENAI_API_KEY']
# matching a new query with a set of existing question
def fuzzy_wuzzy_match(question, list_of_prev_questions):
Str_A = 'Read the sentence - My name is Ali'
Str_B = 'My name is Ali'
ratio = fuzz.token_set_ratio(Str_A, Str_B)
print(ratio)
# get a list of matches ordered by score, default limit to 5
print(process.extract(question, list_of_prev_questions))
print(process.extractOne(question, list_of_prev_questions))
# just to compare the performance, we're seeing how OpenAI performs on the same data
def open_ai_match(question, list_of_prev_questions):
question_embedding, question_tokens = get_embedding(question)
prev_questions_embeddings, cost_incurred = compute_questions_embeddings(list_of_prev_questions)
print(question_tokens * 0.0004 / 1000)
print(cost_incurred)
document_similarities = sorted([
(vector_similarity(question_embedding, prev_questions_embeddings[prev_question]), prev_question) for prev_question in prev_questions_embeddings.keys()
], reverse=True)
print(document_similarities)
def compute_questions_embeddings(questions):
"""
Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
"""
embedding_dict = {}
total_tokens_used = 0
for question in questions:
embedding, tokens = get_embedding(question)
embedding_dict[question] = embedding
total_tokens_used = total_tokens_used + tokens
cost_incurred = total_tokens_used * 0.0004 / 1000
return embedding_dict, cost_incurred
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def get_embedding(text: str, model: str=EMBEDDING_MODEL):
time.sleep(7)
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"], result["usage"]["total_tokens"]
def main():
query = 'what is klima token'
choices = ['what\'s klima', 'what klima dao', 'what is the klima token', 'what is klima dao']
fuzzy_wuzzy_match(query, choices) # getting the match fr
open_ai_match(query, choices)
if __name__ == '__main__':
main() | [] |
2024-01-10 | KoenigSimon/playerhunt_ai | gpt_interface.py | import openai
import time
import data_model as data
import asyncio
openai.api_key = open('api_key.txt').readline().strip()
model_tier = [
"gpt-3.5-turbo",
"gpt-4-1106-preview"
]
async def consult_ai(messages, model=model_tier[0]):
chat = await openai.ChatCompletion.acreate(
model=model, messages=messages
)
data.console_state[5] = "Last AI response: "+ time.strftime("%H:%M:%S")
reply = chat.choices[0].message.content
return {"role": "assistant", "content": reply}
def print_messages(messages):
for message in messages:
if message['role'] == 'user':
print("user: " + message['content'])
if message['role'] == 'assistant':
print("ai: " + message['content'])
| [] |
2024-01-10 | ErikFub/auto-coverletter | src~llm_chain~chains.py | """Collection of LLM chains."""
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
class CVSerializationChain(LLMChain):
def __init__(self):
prompt = PromptTemplate.from_template("""
Extract information from the CV provided. Return the information grouped under the following: Contact,
Academic Background, Professional Experience, Skills, Other. Start each group following this pattern:
[<GROUP NAME>], e.g. for 'Skills' do '[SKILLS]'. Return the information as detailed as possible. To
separate topics, use bullet points.
CV: {cv}
""")
super().__init__(
llm=OpenAI(max_tokens=2000),
prompt=prompt,
)
class JobPostSerializationChain(LLMChain):
def __init__(self):
prompt = PromptTemplate.from_template("""
For the following HTML response, extract the following: company name, job description, company description,
job requirements. Start each group following this pattern: [<GROUP NAME>], e.g. for 'Skills' do '[SKILLS]'.
Return the information as detailed as possible. HTML response: '{content}'")
""")
super().__init__(
llm=OpenAI(max_tokens=2000),
prompt=prompt,
)
class CoverLetterCreationChain(LLMChain):
def __init__(self):
prompt = PromptTemplate.from_template("""
Based on given CV information and a job posting, write a cover letter. Write 230 to 330 words. Start with
'Dear Sir or Madam'. Do not use any placeholder values. Only make claims that are backed by data on the CV.
Focus on mentioning how the requirements are fulfilled instead of listing everything from the CV.
Additionally, follow these guidelines:
[[INTRODUCTION (1st paragraph)]]
- State clearly in your opening sentence the purpose for your letter and a brief professional introduction.
- Specify why you are interested in that specific position and organization.
- Provide an overview of the main strengths and skills you will bring to the role.
[[BODY (2-3 paragraphs)]]
- Cite a couple of examples from your experience that support your ability to be successful in the position or organization.
- Try not to simply repeat your resume in paragraph form, complement your resume by offering a little more detail about key experiences.
- Discuss what skills you have developed and connect these back to the target role.
[[CLOSING (last paragraph)]]
- Restate succinctly your interest in the role and why you are a good candidate.
- Thank the reader for their time and consideration.
In the following, the required information about the applicant and the job:
[[CV]]
{cv}
[[JOB POSTING]]
{job_posting}
""")
super().__init__(
llm=OpenAI(max_tokens=1000),
prompt=prompt,
)
| [
"\n Extract information from the CV provided. Return the information grouped under the following: Contact, \n Academic Background, Professional Experience, Skills, Other. Start each group following this pattern: \n [<GROUP NAME>], e.g. for 'Skills' do '[SKILLS]'. Return the information as detailed as possible. To \n separate topics, use bullet points.\n CV: {cv}\n ",
"\n For the following HTML response, extract the following: company name, job description, company description, \n job requirements. Start each group following this pattern: [<GROUP NAME>], e.g. for 'Skills' do '[SKILLS]'. \n Return the information as detailed as possible. HTML response: '{content}'\")\n ",
"\n Based on given CV information and a job posting, write a cover letter. Write 230 to 330 words. Start with \n 'Dear Sir or Madam'. Do not use any placeholder values. Only make claims that are backed by data on the CV. \n Focus on mentioning how the requirements are fulfilled instead of listing everything from the CV. \n Additionally, follow these guidelines:\n [[INTRODUCTION (1st paragraph)]]\n - State clearly in your opening sentence the purpose for your letter and a brief professional introduction.\n - Specify why you are interested in that specific position and organization.\n - Provide an overview of the main strengths and skills you will bring to the role.\n \n [[BODY (2-3 paragraphs)]]\n - Cite a couple of examples from your experience that support your ability to be successful in the position or organization.\n - Try not to simply repeat your resume in paragraph form, complement your resume by offering a little more detail about key experiences.\n - Discuss what skills you have developed and connect these back to the target role.\n \n [[CLOSING (last paragraph)]]\n - Restate succinctly your interest in the role and why you are a good candidate.\n - Thank the reader for their time and consideration.\n \n In the following, the required information about the applicant and the job:\n [[CV]]\n {cv}\n \n [[JOB POSTING]]\n {job_posting}\n "
] |
2024-01-10 | ErikFub/auto-coverletter | src~llm_chain~parsers.py | import json
from langchain.schema import BaseOutputParser
class JsonOutputParser(BaseOutputParser):
def parse(self, text: str) -> dict:
return json.loads(text)
| [] |
2024-01-10 | ethan0807/REG_BOT | LLM_Text_Summerizer.py | import Globals
import openai
import logging
from llama_index.chat_engine import SimpleChatEngine
from langchain.chat_models import ChatOpenAI
from llama_index import ServiceContext
import sys
# Generates a summary for a given node(s)
globals = Globals.Defaults()
openai.api_key = globals.open_api_key
temperature = 0 # globals.default_temperature
model = globals.default_model
def generate_summary_chat(nodes):
responses = []
service_context = ServiceContext.from_defaults(
llm=ChatOpenAI(temperature=temperature, model=model))
chat_engine = SimpleChatEngine.from_defaults(
service_context=service_context)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
for node_with_score in nodes:
text = node_with_score.node.text
response = chat_engine.chat("Restate the following text more succintly and clearly: " + text)
#response = chat_engine.chat(text + "\n\nTl;dr")
responses.append(response)
chat_engine.reset()
return responses
| [
"Restate the following text more succintly and clearly: PLACEHOLDER"
] |
2024-01-10 | ethan0807/REG_BOT | LLM_Vector_Index_Generator.py | import openai
from llama_index import Document, GPTVectorStoreIndex, SimpleDirectoryReader, EmptyIndex
import Globals
import os
import re
# Generates the vector index from the text files in the texts directory and persists it to the index directory
globals = Globals.Defaults()
index_path = globals.index_path
texts_path = globals.texts_path
openai.api_key = globals.open_api_key
if not os.path.exists(index_path):
os.mkdir(index_path)
if not os.path.exists(texts_path):
os.mkdir(texts_path)
def list_files(texts_path):
# List to store the full paths
full_paths = []
# Walk through the directory
for dirpath, dirnames, filenames in os.walk(texts_path):
for filename in filenames:
# Combine the path to the directory with the filename
full_path = os.path.join(texts_path, filename)
full_paths.append(full_path)
return full_paths
def parse_filename(filename):
pattern = r'^(.*?)_(.*?)_(.*?).txt$'
match = re.match(pattern, filename)
# If a match was found, extract the groups
if match:
regulation_name = match.group(1)
section_number = match.group(2)
section_name = match.group(3)
return regulation_name, section_number, section_name
else:
print('Not found! ' + filename)
return None
documents = []
text_files = [list_files(os.path.abspath(texts_path))]
doc_id = 0
for file_list in text_files:
for file in file_list:
with open(file, 'r', encoding='utf-8') as f:
text = ' '.join(f.readlines())
file_name = os.path.basename(file)
metadata = parse_filename(file_name)
print(metadata)
document = Document(
text,
doc_id=doc_id,
extra_info={
'regulation': metadata[0],
'section_number': metadata[1],
'section_name': metadata[2].replace('_', ' ')
})
documents.append(document)
doc_id += 1
index = GPTVectorStoreIndex.from_documents(documents)
if len(index.storage_context.docstore.docs) == 0:
print("The generated index is empty.")
else:
# save vector index to persistant storage
index.storage_context.persist(persist_dir=index_path)
| [] |
2024-01-10 | ethan0807/REG_BOT | AR_Bot.py | from LLM_Ranked_Retriever import get_retrieved_nodes
from LLM_Text_Summerizer import generate_summary_chat
from LLM_Index_Loader import load_index
import Globals
import openai
import streamlit as st
from streamlit_chat import message
# Streamlit UI for the Army RegBot
# Command line run: streamlit run AR_Bot.py --server.port 80
globals = Globals.Defaults()
openai.api_key = globals.open_api_key
model_name = globals.default_model
source = ""
index = load_index()
# Setting page title and header
st.set_page_config(page_title="REGBOT", page_icon=":robot_face:")
st.markdown("<h1 style='text-align: center;'>Army RegBot</h1>",
unsafe_allow_html=True)
# Initialise session state variables
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
if 'messages' not in st.session_state:
st.session_state['messages'] = [
{"role": "system", "content": "You are a helpful assistant."}
]
if 'model_name' not in st.session_state:
st.session_state['model_name'] = []
# Sidebar - let user clear the current conversation
st.sidebar.title("Enter a question about the following Army Regulations:")
st.sidebar.markdown(" AR735-5 Property Management, AR600-8-24 Officer Transfer and Discharge, AR 608-10 Child Development Services, AR750-1 Material Maintenance Management")
clear_button = st.sidebar.button("Clear Conversation", key="clear")
# reset everything
if clear_button:
st.session_state['generated'] = []
st.session_state['past'] = []
st.session_state['messages'] = [
{"role": "system", "content": "You are a helpful assistant."}
]
# generate a response
def generate_response(prompt):
st.session_state['messages'].append({"role": "user", "content": prompt})
nodes_with_score = get_retrieved_nodes(index, prompt, vector_top_k=1)
if nodes_with_score[0].score < 0.75:
response = "I'm sorry, I can't find a good answer to that question. Can you please rephrase it or attempt to be more specific?"
else:
summaries = generate_summary_chat(nodes_with_score)
reg = nodes_with_score[0].node.metadata["regulation"]
sec_num = nodes_with_score[0].node.metadata["section_number"]
sec_name = nodes_with_score[0].node.metadata["section_name"]
source = f"Source: Regulation {reg}, {sec_num} {sec_name}"
summaries[0].response += "\n\n" + source
response = summaries[0].response
st.session_state['messages'].append(
{"role": "assistant", "content": response})
print(st.session_state['messages'])
return response
# container for chat history
response_container = st.container()
# container for text box
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_area("You:", key='input', height=100)
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
output = generate_response(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if 'generated' in st.session_state:
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i],
is_user=True, key=str(i) + '_user')
message(st.session_state["generated"][i], key=str(i))
| [
"You are a helpful assistant."
] |
2024-01-10 | ethan0807/REG_BOT | LLM_Index_Loader.py | from llama_index import LLMPredictor, ServiceContext, StorageContext, load_index_from_storage
from langchain.chat_models import ChatOpenAI
import openai
import Globals
# Loads the vector index from the storage directory
globals = Globals.Defaults()
openai.api_key = globals.open_api_key
default_model = globals.default_model
default_temperature = globals.default_temperature
default_max_chunk_size = globals.default_max_chunk_size
index_path = globals.index_path
def load_index():
# define LLM
llm_predictor = LLMPredictor(llm=ChatOpenAI(
temperature=default_temperature, model_name=default_model))
# configure service context
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, chunk_size=default_max_chunk_size)
# rebuild storage context
storage_context = StorageContext.from_defaults(
persist_dir=index_path)
# load index
return load_index_from_storage(storage_context)
| [] |
2024-01-10 | ethan0807/REG_BOT | LLM_QABot_Simple_Tester.py | from llama_index.tools import QueryEngineTool, ToolMetadata
import openai
from LLM_Index_Loader import load_index
import logging
import os
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
openai.api_key = os.environ["OPENAI_API_KEY"]
# model = "gpt-4" # chunk limit 8192
# model = "gpt-3.5-turbo" # chunk limit 4096
# model = "text-davinci-003" # chunk limit 4097
model = "ada"
max_chunk_size = 1024
index_version = "v1"
data_path = "./data/index/" + index_version + "/"
# load index
index = load_index() # load_index_from_storage(storage_context)
# Create query engine off of index
query_engine = index.as_query_engine(similarity_top_k=1)
# setup base query engine as tool
query_engine_tools = [
QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(name='ArmyRegs', description='Army Regulations')
)
]
query1 = "What are the time constraints for processing a flipl? Provide the name of the Army Regulation this info is found in."
query2 = "What are the specific codes that can be validly entered into GCSS Army that represent an equipment fault according to AR 750-1?"
query3 = "What are the DD Form 200 processing time segments and their associated time constraints?"
response = query_engine.query(query1)
for node in response.source_nodes:
print(node.doc_id)
print(node.score)
print(node.source_text)
print(response)
response = query_engine.query(query2)
print(response)
response = query_engine.query(query3)
print(response)
| [] |
2024-01-10 | BPinkham2024/csp-summer-work | project~Akane.py | import json
import discord
from discord.ext import commands
from discord import app_commands
import openai
import config
# bot init
bot = commands.Bot(command_prefix='>', intents=discord.Intents.all())
# openai init
openai.api_key = config.AI_TOKEN
# def chat_with_bot(message):
# prompt = f"You: {message}\nAkane Akemi:"
# response = openai.ChatCompletion.create(
# engine='gpt-3.5-turbo',
# prompt=prompt,
# max_tokens=50,
# temperature=0.7,
# n=1,
# stop=None,
# timeout=5
# )
# if response and response.choices:
# return response.choices[0].text.strip()
# else:
# return "Sorry, I couldn't generate a response at the moment."
def chat_with_bot(message):
chat_prompt = [
{"role": "system", "content": "You are Akane Akemi, a helpful assistant. You will answer any question and whenever you are asked your name you will respond with 'Akane Akemi'"},
{"role": "user", "content": message}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=chat_prompt,
max_tokens=50,
temperature=0.7
)
if response and response.choices:
reply = response.choices[0].message["content"]
return reply.strip()
else:
return "Sorry, I couldn't generate a response at the moment."
# -------------------------------------------------------------------------
@bot.event
async def on_ready():
print(f'Logged in as {bot.user.name} ({bot.user.id})')
print('----')
try:
synced = await bot.tree.sync()
print(f'Synced {len(synced)} command(s)')
except Exception as e:
print(e)
# commands
@bot.tree.command(name="chat", description="Chat with Akane Akemi!")
@app_commands.describe(msg = "msg")
async def chat(interaction: discord.Interaction, msg: str):
# await interaction.response.send_message(chat_with_bot(arg), ephemeral=True)
await interaction.response.send_message(chat_with_bot(msg))
@bot.tree.command(name="embedded-chat", description="Chat with Akane Akemi but she will responed in an embed.")
@app_commands.describe(msg = "msg")
async def chat_embed(interaction: discord.Interaction, msg: str):
# embed = discord.Embed(title='Akane Akemi', description=chat_with_bot(msg), color=0x00FF22)
await interaction.response.send_message(embed=discord.Embed(title='Akane Akemi', description=chat_with_bot(msg), color=0xff748c))
@bot.tree.command(name="whothis")
async def whothis_command(interaction: discord.Interaction, member: discord.Member):
embed=discord.Embed(title=f"{member.name}", description=f"ID: {member.id}")
embed.add_field(name="Join Date", value=member.created_at.strftime("%m/%d/%Y %H:%M:%S"), inline=False)
embed.add_field(name="Badges", value=", ".join([badge.name for badge in member.public_flags.all()]), inline=False)
embed.add_field(name="Activity", value=member.activity)
embed.set_thumbnail(url=member.avatar.url)
await interaction.response.send_message(embed=embed)
# context menues
@bot.tree.context_menu(name="whothis")
async def whothis(interaction: discord.Interaction, member: discord.Member):
embed=discord.Embed(title=f"{member.name}", description=f"ID: {member.id}")
embed.add_field(name="Join Date", value=member.created_at.strftime("%m/%d/%Y %H:%M:%S"), inline=False)
embed.add_field(name="Badges", value=", ".join([badge.name for badge in member.public_flags.all()]), inline=False)
embed.add_field(name="Activity", value=member.activity)
embed.set_thumbnail(url=member.avatar.url)
await interaction.response.send_message(embed=embed)
@bot.event
async def on_message(message):
try:
user = str(message.author)
user_msg = str(message.content)
channel = str(message.channel.name)
data = str(f'{user}: {user_msg} ({channel})')
print(data)
except Exception as e:
print('**hidden message**')
log_message(message)
if message.author == bot.user:
return
if message.channel.name == 'general':
if user_msg.lower() == 'hello':
await message.channel.send(f'hello, {user}!')
if user_msg.lower().startswith('>chat'):
await message.channel.send(chat_with_bot(user_msg[5:]))
# log chat messages
def log_message(message):
message_data = {
'author': str(message.author),
'content': message.content,
'timestamp': str(message.created_at),
'channel': str(message.channel)
}
chat_log = load_chat_log()
chat_log.append(message_data)
save_chat_log(chat_log)
def load_chat_log():
try:
with open('chat_log.json', 'r') as file:
chat_log = json.load(file)
except FileNotFoundError:
chat_log = []
return chat_log
def save_chat_log(chat_log):
with open('chat_log.json', 'w') as file:
json.dump(chat_log, file, indent=4)
# run bot :)
bot.run(config.BOT_TOKEN) | [
"You are Akane Akemi, a helpful assistant. You will answer any question and whenever you are asked your name you will respond with 'Akane Akemi'"
] |
2024-01-10 | baoguangsheng/fast-detect-gpt | scripts~data_builder.py | # Copyright (c) Guangsheng Bao.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import numpy as np
import datasets
import torch
import random
import argparse
import os
import json
import custom_datasets
from model import load_tokenizer, load_model
def save_data(output_file, args, data):
# write args to file
args_file = f"{output_file}.args.json"
with open(args_file, "w") as fout:
json.dump(args.__dict__, fout, indent=4)
print(f"Args written into {args_file}")
# write the data to a json file in the save folder
data_file = f"{output_file}.raw_data.json"
with open(data_file, "w") as fout:
json.dump(data, fout, indent=4)
print(f"Raw data written into {data_file}")
def load_data(input_file):
data_file = f"{input_file}.raw_data.json"
with open(data_file, "r") as fin:
data = json.load(fin)
print(f"Raw data loaded from {data_file}")
return data
class DataBuilder:
def __init__(self, args):
self.args = args
self.base_tokenizer = load_tokenizer(args.base_model_name, args.dataset, args.cache_dir)
self.base_model = None if args.openai_model else load_model(args.base_model_name, args.device, args.cache_dir)
def _openai_sample(self, prefix):
def _drop_last_word(text):
return ' '.join(text.split(' ')[:-1])
import openai
assert self.args.openai_key is not None, "Must provide OpenAI API key as --openai_key"
openai.api_key = self.args.openai_key
if self.args.openai_base is not None:
openai.api_base = self.args.openai_base
if self.args.dataset != 'pubmed': # keep Answer: prefix for pubmed
prefix = _drop_last_word(prefix)
# sample from the openai model
kwargs = {"max_tokens": 200}
if self.args.do_top_p:
kwargs['top_p'] = self.args.top_p
elif self.args.do_top_k:
kwargs['top_k'] = self.args.top_k
elif self.args.do_temperature:
kwargs['temperature'] = self.args.temperature
if self.args.openai_model == 'davinci':
kwargs["engine"] = self.args.openai_model
response = openai.Completion.create(prompt=f"{prefix}", **kwargs)
return prefix + response['choices'][0]['text']
elif self.args.openai_model in ['gpt-3.5-turbo', 'gpt-4']:
roles = {'xsum': 'You are a News writer.',
'writing': 'You are a Fiction writer.',
'pubmed': 'You are a Technical writer.'}
prompts = {'xsum': 'Please write an article with about 150 words starting exactly with:',
'writing': 'Please write an article with about 150 words starting exactly with:',
'pubmed': 'Please answer the question in about 50 words.'}
messages = [
{'role': 'system', 'content': roles[self.args.dataset]},
{'role': 'user', 'content': f'{prompts[self.args.dataset]} {prefix}'},
]
kwargs["model"] = self.args.openai_model
kwargs["messages"] = messages
response = openai.ChatCompletion.create(**kwargs)
response = response['choices'][0]['message']['content']
# ChatGPT may repeat the prefix
if response.startswith(prefix[:20]):
return response
return prefix + ' ' + response
else:
raise NotImplementedError
# sample from base_model using ****only**** the first 30 tokens in each example as context
def _sample_from_model(self, texts, min_words=55, prompt_tokens=30):
# encode each text as a list of token ids
if self.args.dataset == 'pubmed':
texts = [t[:t.index(custom_datasets.SEPARATOR)] for t in texts]
all_encoded = self.base_tokenizer(texts, return_tensors="pt", padding=True, return_token_type_ids=False).to(self.args.device)
else:
all_encoded = self.base_tokenizer(texts, return_tensors="pt", padding=True, return_token_type_ids=False).to(self.args.device)
all_encoded = {key: value[:, :prompt_tokens] for key, value in all_encoded.items()}
if self.args.openai_model:
# decode the prefixes back into text
prefixes = self.base_tokenizer.batch_decode(all_encoded['input_ids'], skip_special_tokens=True)
decoded = []
for idx, prefix in enumerate(prefixes):
while idx >= len(decoded):
try:
decoded.append(self._openai_sample(prefix))
except Exception as ex:
print(ex)
print('Wait 10 minutes before retry ...')
time.sleep(600)
else:
self.base_model.eval()
decoded = ['' for _ in range(len(texts))]
# sample from the model until we get a sample with at least min_words words for each example
# this is an inefficient way to do this (since we regenerate for all inputs if just one is too short), but it works
tries = 0
m = 0
while m < min_words:
if tries != 0:
print()
print(f"min words: {m}, needed {min_words}, regenerating (try {tries})")
prefixes = self.base_tokenizer.batch_decode(all_encoded['input_ids'], skip_special_tokens=True)
for prefix, x in zip(prefixes, decoded):
if len(x.split()) == m:
print(prefix, '=>', x)
sampling_kwargs = {}
if self.args.do_top_p:
sampling_kwargs['top_p'] = self.args.top_p
elif self.args.do_top_k:
sampling_kwargs['top_k'] = self.args.top_k
elif self.args.do_temperature:
sampling_kwargs['temperature'] = self.args.temperature
min_length = 50 if self.args.dataset in ['pubmed'] else 150
outputs = self.base_model.generate(**all_encoded, min_length=min_length, max_length=200, do_sample=True,
**sampling_kwargs, pad_token_id=self.base_tokenizer.eos_token_id,
eos_token_id=self.base_tokenizer.eos_token_id)
decoded = self.base_tokenizer.batch_decode(outputs, skip_special_tokens=True)
m = min(len(x.split()) for x in decoded)
tries += 1
return decoded
def generate_samples(self, raw_data, batch_size):
# trim to shorter length
def _trim_to_shorter_length(texta, textb):
# truncate to shorter of o and s
shorter_length = min(len(texta.split(' ')), len(textb.split(' ')))
texta = ' '.join(texta.split(' ')[:shorter_length])
textb = ' '.join(textb.split(' ')[:shorter_length])
return texta, textb
def _truncate_to_substring(text, substring, idx_occurrence):
# truncate everything after the idx_occurrence occurrence of substring
assert idx_occurrence > 0, 'idx_occurrence must be > 0'
idx = -1
for _ in range(idx_occurrence):
idx = text.find(substring, idx + 1)
if idx == -1:
return text
return text[:idx]
data = {
"original": [],
"sampled": [],
}
for batch in range(len(raw_data) // batch_size):
print('Generating samples for batch', batch, 'of', len(raw_data) // batch_size)
original_text = raw_data[batch * batch_size:(batch + 1) * batch_size]
sampled_text = self._sample_from_model(original_text, min_words=30 if self.args.dataset in ['pubmed'] else 55)
for o, s in zip(original_text, sampled_text):
if self.args.dataset == 'pubmed':
s = _truncate_to_substring(s, 'Question:', 2)
o = o.replace(custom_datasets.SEPARATOR, ' ')
o, s = _trim_to_shorter_length(o, s)
# add to the data
data["original"].append(o)
data["sampled"].append(s)
return data
def generate_data(args, dataset, key):
# strip newlines from each example; replace one or more newlines with a single space
def _strip_newlines(text):
return ' '.join(text.split())
# load data
if dataset in custom_datasets.DATASETS:
data = custom_datasets.load(dataset, args.cache_dir)
else:
data = custom_datasets.load_dataset(dataset, split='train', cache_dir=args.cache_dir)[key]
# get unique examples, strip whitespace, and remove newlines
# then take just the long examples, shuffle, take the first 5,000 to tokenize to save time
# then take just the examples that are <= 512 tokens (for the base model)
# then generate n_samples samples
# remove duplicates from the data
data = list(dict.fromkeys(data)) # deterministic, as opposed to set()
# strip whitespace around each example
data = [x.strip() for x in data]
# remove newlines from each example
data = [_strip_newlines(x) for x in data]
# try to keep only examples with > 250 words
if dataset in ['writing', 'squad', 'xsum']:
long_data = [x for x in data if len(x.split()) > 250]
if len(long_data) > 0:
data = long_data
random.shuffle(data)
data = data[:5_000]
# keep only examples with <= 512 tokens according to base_tokenizer
# this step has the extra effect of removing examples with low-quality/garbage content
data_builder = DataBuilder(args)
tokenized_data = data_builder.base_tokenizer(data)
data = [x for x, y in zip(data, tokenized_data["input_ids"]) if len(y) <= 512]
# print stats about remaining data
print(f"Total number of samples: {len(data)}")
print(f"Average number of words: {np.mean([len(x.split()) for x in data])}")
return data_builder.generate_samples(data[:args.n_samples], batch_size=args.batch_size)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--output_file', type=str, default="./exp_gpt3/data/xsum_gpt2")
parser.add_argument('--dataset', type=str, default="xsum")
parser.add_argument('--n_samples', type=int, default=200)
parser.add_argument('--openai_base', type=str, default=None)
parser.add_argument('--openai_key', type=str, default=None)
parser.add_argument('--openai_model', type=str, default=None) # davinci, gpt-3.5-turbo, gpt-4
parser.add_argument('--base_model_name', type=str, default="gpt2")
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--do_top_k', action='store_true')
parser.add_argument('--top_k', type=int, default=40)
parser.add_argument('--do_top_p', action='store_true')
parser.add_argument('--top_p', type=float, default=0.96)
parser.add_argument('--do_temperature', action='store_true')
parser.add_argument('--temperature', type=float, default=0.8)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--device', type=str, default="cuda")
parser.add_argument('--cache_dir', type=str, default="../cache")
args = parser.parse_args()
os.environ["XDG_CACHE_HOME"] = args.cache_dir
if not os.path.exists(args.cache_dir):
os.makedirs(args.cache_dir)
print(f"Using cache dir {args.cache_dir}")
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
print(f'Loading dataset {args.dataset}...')
dataset_keys = {'xsum': 'document', 'squad': 'context', 'writing': 'document'}
data = generate_data(args, args.dataset, dataset_keys[args.dataset] if args.dataset in dataset_keys else None)
save_data(args.output_file, args, data)
| [
"{'xsum': 'Please write an article with about 150 words starting exactly with:', 'writing': 'Please write an article with about 150 words starting exactly with:', 'pubmed': 'Please answer the question in about 50 words.'}"
] |
2024-01-10 | saraoriola/poppaFront | Datascience~src~api_data.py | from spanlp.palabrota import Palabrota
from spanlp.domain.strategies import Preprocessing, RemoveUserMentions, RemoveUrls, RemoveHashtags, RemoveEmoticons, RemoveEmailAddress
from flask import Flask, jsonify, request
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker
from datetime import timedelta
import pandas as pd
import psycopg2
import requests
import openai
import deepl
import os
from dotenv import load_dotenv
import locale
# Cargar variables de entorno desde el archivo .env
load_dotenv()
# Asignar variables de OpenAI
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_PROMPT_FEEDBACK = os.getenv('OPENAI_PROMPT_FEEDBACK')
openai.api_key = OPENAI_API_KEY
# Asignar variables de DeepL
DEEPL_API_KEY = os.getenv('DEEPL_API_KEY')
translator = deepl.Translator(DEEPL_API_KEY)
# Asignar variables de AWS
AWS_USER = os.getenv('AWS_USER')
AWS_PASS = os.getenv('AWS_PASS')
AWS_HOST = os.getenv('AWS_HOST')
AWS_PORT = os.getenv('AWS_PORT')
AWS_NAME = os.getenv('AWS_NAME')
# Asignar variables de RENDER
RENDER_USER = os.getenv('RENDER_USER')
RENDER_PASS = os.getenv('RENDER_PASS')
RENDER_HOST = os.getenv('RENDER_HOST')
RENDER_PORT = os.getenv('RENDER_PORT')
RENDER_NAME = os.getenv('RENDER_NAME')
# Instanciar funciones de bad languaje
palabrota = Palabrota()
strategies = [RemoveEmailAddress(), RemoveUrls(), RemoveUserMentions(), RemoveHashtags(), RemoveEmoticons()]
# Función que procesa la api de OpenAI para el feedback
def api_openai_feedback(inputs):
"""
Esta función utiliza el modelo GPT-3.5 Turbo de OpenAI para proporcionar una respuesta basada en un mensaje de entrada.
Args:
inputs (str): El mensaje de entrada que se enviará al modelo.
Returns:
dict: Un diccionario que contiene la respuesta generada por el modelo, incluyendo información como el texto de la respuesta, la probabilidad, etc.
"""
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": OPENAI_PROMPT_FEEDBACK
},
{
"role": "user",
"content": str(inputs)
}
],
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response
# Función que procesa la api de OpenAI para generar una imagen de portada
def api_openai_cover_image(inputs, size="1024x1024"):
"""
Genera una solicitud a la API de generación de imágenes de OpenAI para crear una imagen realista basada en el texto de entrada proporcionado.
Parámetros:
inputs (str): Un texto descriptivo de entrada que proporciona instrucciones para el proceso de generación de imágenes.
size (str, opcional): El tamaño deseado de la imagen generada en el formato "anchoxalto" (por ejemplo, "1024x1024").
El valor predeterminado es "1024x1024".
Retorna:
str: URL de la imagen generada.
str: El tamaño de la imagen generada.
"""
response = openai.Image.create(
prompt=f'Create realistic image of {inputs} with a cartoon details and without landscape. The color palette should be composed of soothing neutral and cream tones, and the background should be a pure, crisp white.',
n=1,
size=size
)
return response['data'][0]['url'], size
# URL de la API de DeepL para obtener los idiomas disponibles
def api_deepl_languages(language='English (British)'):
"""
Recupera los idiomas compatibles con DeepL y sus códigos, y devuelve el código para un idioma especificado.
Parámetros:
language (str, opcional): El nombre del idioma para el cual deseas recuperar el código.
El valor predeterminado es 'Inglés (Británico)'.
Retorna:
str: El código de idioma para el idioma especificado.
Nota:
- Debes tener una clave API válida de DeepL almacenada en la variable DEEPL_API_KEY para usar esta función.
- Asegúrate de configurar un agente de usuario en los encabezados para cumplir con las pautas de uso de la API.
"""
url = "https://api-free.deepl.com/v2/languages?type=target"
headers = {
"Authorization": f"DeepL-Auth-Key {DEEPL_API_KEY}",
"User-Agent": "YourApp/1.2.3"
}
response = requests.get(url, headers=headers)
dict_languages = {}
for item in response.json():
dict_languages[item['name']] = item['language']
return dict_languages[language]
app = Flask(__name__)
# HOME
@app.route('/', methods=['GET'])
def hello_world():
return 'Hello, World!'
# API PARA BAD LANGUAGE Y SENTIMENT ANALYSIS
@app.route('/get_sentiment_analysis_feedback', methods=['POST'])
def sentiment_analysis_model():
"""
Analiza el sentimiento y realiza diversas operaciones en el feedback proporcionado.
Esta API recibe feedback a través del cuerpo de una solicitud POST y realiza las siguientes tareas:
1. Verifica si el feedback contiene palabrotas.
2. Limpia el texto del feedback según estrategias definidas antes de enviarlo a la API de OpenAI.
3. Realiza un análisis de sentimiento sobre el feedback utilizando la API de OpenAI.
4. Procesa y presenta los resultados en un formato legible.
Parameters:
Ninguno explícito (los datos de entrada se toman del cuerpo de la solicitud POST).
Returns:
jsonify(dict): Un objeto JSON que contiene información sobre el feedback, incluyendo si contiene palabrotas,
los resultados del análisis de sentimiento y detalles sobre el uso de la API de OpenAI.
Nota:
- Esta API está diseñada para ser utilizada con una solicitud POST que proporcione un objeto JSON con un campo
llamado 'feedback' que contenga el texto del feedback a analizar.
- Se asume que la función 'api_openai_feedback' se encuentra definida en otra parte del código y maneja la
comunicación con la API de OpenAI.
- Los resultados se presentan en un formato JSON que puede incluir información sobre el contenido del feedback,
resultados de análisis de sentimiento y detalles de uso de la API de OpenAI.
- Si se produce un error durante el proceso, se devolverá una respuesta JSON con un mensaje de error y un código
de estado 400 (Bad Request).
"""
# Recoge el feedback del Body
data = request.get_json()
feedback = data['feedback'].encode('utf-8').decode('utf-8')
dict_data = {}
# Comprueba si tiene una palabrota
swearword = palabrota.contains_palabrota(feedback)
dict_data['swearword'] = swearword
# Limpia el texto del feedback antes de pasarlo por la API de OpenAI
preprocessor = Preprocessing(data=feedback, clean_strategies=strategies)
clean_feedback = preprocessor.clean().split('.')
# Análisis de sentimiento sobre el feedback con la API de OpenAI
valuation = api_openai_feedback(clean_feedback)
try:
# Limpiar el formato del output de la función de OpenAI
data = valuation['choices'][0]['message']['content']
data = data.replace('{', '').replace('}', '').replace(',', '').replace('"', '').strip().split('\n')
for item in data:
prov = item.strip().split(':')
key, value = prov[0], prov[1]
dict_data[key] = int(value)
# Contabilidaz del coste
usage = valuation['usage']
prov_dict = {}
for key, value in usage.items():
prov_dict[key] = int(value)
dict_data['usage'] = prov_dict
except:
response = {"ERROR": "Parece ser un error en el proceso de limpieza de los datos de la API utilizada para el feedback."}
return jsonify(response), 400
return jsonify(dict_data), 200
# API PARA IMAGE MODEL
@app.route('/get_cover_image', methods=['POST'])
def cover_image():
"""
Genera una imagen de portada basada en el texto proporcionado utilizando la API de OpenAI.
Esta API toma un objeto JSON del cuerpo de una solicitud POST que contiene un campo llamado 'text'. El proceso
consta de los siguientes pasos:
1. Recoge el texto del campo 'text' en el objeto JSON de entrada.
2. Traduce el texto a inglés británico (EN-GB) utilizando un servicio de traducción.
3. Genera una imagen de portada utilizando la API de OpenAI basada en el texto traducido.
4. Devuelve la URL de la imagen generada y el costo estimado de acuerdo con el tamaño de la imagen.
Parameters:
Ninguno explícito (los datos de entrada se toman del cuerpo de la solicitud POST).
Returns:
jsonify(dict): Un objeto JSON que contiene la URL de la imagen generada y el costo estimado.
Nota:
- Esta API espera recibir una solicitud POST con un objeto JSON que tenga un campo llamado 'text' que contenga el texto
que se utilizará para generar la imagen.
- El texto se traduce a inglés británico (EN-GB) antes de enviarlo a la API de OpenAI.
- El costo estimado se calcula en función del tamaño de la imagen generada y se devuelve en el objeto JSON de salida.
"""
# Recoge el feedback del Body
data = request.get_json()
text = data['text'].encode('utf-8').decode('utf-8')
# Traducir texto_body a inglés
translation = translator.translate_text(text, target_lang="EN-GB")
# Obtener url imagen generada con OpenAI
ouput_api = api_openai_cover_image(translation.text)
url_image = ouput_api[0]
size_image = ouput_api[1]
# Diccionario que ajusta el coste que supone generar una imagen con OpenAI
dict_size = {
"1024x1024": 0.020,
'512×512': 0.018,
'256×256': 0.016,
}
# Crear diccionario de salida con la URL y el coste
dict_url = {
"url": url_image,
"usage": dict_size[size_image]
}
return jsonify(dict_url), 200
# API PARA TRANSLATION MODEL
@app.route('/get_realtime_translation', methods=['POST'])
def realtime_translation():
"""
Realiza traducción en tiempo real de texto a un idioma especificado utilizando un servicio de traducción.
Esta API toma un objeto JSON del cuerpo de una solicitud POST que debe contener los siguientes campos:
- 'text': El texto que se va a traducir.
- 'language': El idioma al que se debe traducir el texto.
Los pasos que realiza la API son los siguientes:
1. Recoge el texto y el idioma del objeto JSON de entrada.
2. Verifica si se especificó un idioma para la traducción.
3. Consulta la API para obtener el código de idioma correspondiente al idioma especificado.
4. Utiliza un servicio de traducción para traducir el texto al idioma especificado.
5. Devuelve un objeto JSON que contiene el texto original y el texto traducido.
Parameters:
Ninguno explícito (los datos de entrada se toman del cuerpo de la solicitud POST).
Returns:
jsonify(dict): Un objeto JSON que contiene el texto original y el texto traducido.
Nota:
- Esta API espera recibir una solicitud POST con un objeto JSON que contenga los campos 'text' y 'language' para
realizar la traducción.
- La API consulta un servicio para obtener el código de idioma correspondiente antes de realizar la traducción.
- El texto traducido se devuelve en el objeto JSON de salida junto con el texto original.
- Si no se especifica un idioma en la solicitud, la API no realizará ninguna traducción y no devolverá ningún resultado.
"""
# Recoge el feedback del Body
data = request.get_json()
text = data['text'].encode('utf-8').decode('utf-8')
language = data['language'].encode('utf-8').decode('utf-8')
if language == None:
return
# Recibir item del idioma
item_language = api_deepl_languages(language)
# Traducir texto_body a un idioma
if 'EN' in item_language:
translation = translator.translate_text(text, target_lang=item_language)
else:
translation =translator.translate_text(text, target_lang=item_language, formality="more")
# Diccionario que muestra texto original y texto traducido
dic_languages = {
'original': text,
'translation': str(translation)
}
return jsonify(dic_languages), 200
# API DASHBOARD
@app.route('/get_dashboard', methods=['POST'])
def dashboard():
"""
Obtiene información detallada de un evento a partir de su título.
Esta API toma el título de un evento como entrada y recopila información detallada sobre ese evento desde una base de datos. La información incluye detalles del evento, como fecha y hora, orador, descripción, capacidad de ubicación, así como estadísticas sobre los asistentes registrados y confirmados, nacionalidades de los asistentes, tipos de organizaciones presentes y un registro de entradas y salidas.
Parameters:
event_title (str): El título del evento del cual se desea obtener información.
Returns:
jsonify(dict): Un objeto JSON que contiene datos detallados sobre el evento, incluyendo detalles del evento en sí, estadísticas de asistentes, nacionalidades, tipos de organizaciones y un registro de entradas y salidas.
Nota:
- Esta API espera recibir una solicitud POST con un objeto JSON que contenga un campo llamado 'event_title' que especifique el título del evento a consultar.
- La información se recopila de una base de datos utilizando SQLAlchemy y se presenta en un formato JSON estructurado para su fácil lectura y procesamiento.
- Los datos proporcionados incluyen detalles generales del evento, estadísticas sobre los asistentes y datos específicos sobre la entrada y salida de asistentes durante el evento.
"""
data = request.get_json()
event_title = data['event_title']
# # Conexión con la BD de AWS
# db_url = f'mysql+mysqlconnector://{AWS_USER}:{AWS_PASS}@{AWS_HOST}:{AWS_PORT}/{AWS_NAME}'
# engine = create_engine(db_url)
# Conexión con la BD de RENDER
db_url = f'postgresql+psycopg2://{RENDER_USER}:{RENDER_PASS}@{RENDER_HOST}:{RENDER_PORT}/{RENDER_NAME}'
engine = create_engine(db_url)
query = f"""
SELECT
"E"."dateTime",
"E"."speacker",
"E"."description",
"E"."title",
"E"."location_id",
"L"."capacity",
"U"."country",
"O"."name",
"T"."name",
"EU"."arriveTime",
"EU"."leaveTime",
COUNT(DISTINCT "T"."name"),
COUNT(DISTINCT "U"."country"),
"U"."confirmed"
FROM
"Events" AS "E"
JOIN
"Locations" AS "L" ON "E"."location_id" = "L"."id"
LEFT JOIN
"EventUsers" AS "EU" ON "E"."id" = "EU"."event_id"
LEFT JOIN
"Users" AS "U" ON "EU"."user_id" = "U"."id"
LEFT JOIN
"Organizations" AS "O" ON "U"."organization_id" = "O"."id"
LEFT JOIN
"Types" AS "T" ON "O"."type_id" = "T"."id"
WHERE
"E"."title" = '{event_title}'
GROUP BY
"E"."dateTime",
"E"."speacker",
"E"."description",
"E"."title",
"E"."location_id",
"L"."capacity",
"U"."country",
"O"."name",
"T"."name",
"EU"."arriveTime",
"EU"."leaveTime",
"U"."confirmed";
"""
# Crea una sesión de SQLAlchemy
Session = sessionmaker(bind=engine)
session = Session()
with session:
# Ejecuta la consulta para leer la tabla Facilities
database = session.execute(text(query))
columnas = database.keys()
# Recupera los resultados
facilities_data = database.fetchall()
# bucle for que corre todos los usuarios
dict_table = {}
horas_entrada = []
horas_salida = []
entry_exit_Entradas = {}
entry_exit_Salidas = {}
nationality = {}
types = {}
registered = 0
confirmed = 0
present = 0
for row in facilities_data:
locale.setlocale(locale.LC_TIME, 'es_ES.UTF-8')
dict_table['title'] = row[3]
dict_table['description'] = row[2]
dict_table['speaker'] = row[1]
dict_table['day'] = str(pd.to_datetime(row[0], yearfirst=True).strftime('%A')).capitalize()
dict_table['month'] = str(pd.to_datetime(row[0], yearfirst=True).strftime('%B')).capitalize()
registered += int(row[11])
confirmed += int(row[13])
present += [1 if row[9] != None else 0][0]
dict_table['capacity'] = row[5]
# ENTRY_EXIT
horas_entrada.append(row[9])
horas_salida.append(row[10])
horas_evento = []
if row[9] == None:
horas_evento.append(None)
else:
if row[10] == None:
horas_evento.append(None)
else:
fecha_inicio = pd.to_datetime(row[9])
fecha_fin = pd.to_datetime(row[10])
hora_abierto = (fecha_inicio - timedelta(hours=1, minutes=5)).to_pydatetime()
hora_cierre = (fecha_fin + timedelta(minutes=30)).to_pydatetime()
while hora_abierto < hora_cierre:
minutos_redondeados = (hora_abierto.minute // 5) * 5
timestamp_redondeado = hora_abierto.replace(minute=minutos_redondeados, second=0)
horas_evento.append(pd.to_datetime(timestamp_redondeado))
hora_abierto += timedelta(minutes=5)
for fecha in horas_evento:
if fecha in horas_entrada:
count = horas_entrada.count(fecha)
if fecha not in entry_exit_Entradas.keys():
entry_exit_Entradas[f'{fecha}'] = count
else:
entry_exit_Entradas[f'{fecha}'] = count
if fecha in horas_salida:
count = horas_salida.count(fecha)
if fecha not in entry_exit_Salidas.keys():
entry_exit_Salidas[f'{fecha}'] = count
else:
entry_exit_Salidas[f'{fecha}'] = count
# NACIONALIDAD
if row[6] not in nationality.keys():
nationality[row[6]] = row[12]
else:
nationality[row[6]] += row[12]
# TYPE
if row[8] not in types.keys():
types[row[8]] = {
row[7]: row[11]
}
elif row[7] not in types[row[8]].keys():
types[row[8]][row[7]] = row[11]
else:
types[row[8]][row[7]] += row[11]
dict_table['attendees'] = {'registered': registered,
'confirmed': confirmed,
'present': present}
list_types = []
for key, value in types.items():
for company, count in value.items():
list_types.append({'id':key, 'typeName':company, 'typeCount':count})
dict_table['type'] = list_types
dict_table['nationality'] = [{'country': country, 'userCount': count} for country, count in nationality.items()]
dict_table['entry_exit'] = [{'id': 'Entradas', 'data': [{'x': pd.to_datetime(fecha), 'y': count} if fecha != 'None' else {'x': fecha, 'y': count} for fecha, count in entry_exit_Entradas.items()]},
{'id': 'Salidas', 'data': [{'x': pd.to_datetime(fecha), 'y': count} if fecha != 'None' else {'x': fecha, 'y': count} for fecha, count in entry_exit_Salidas.items()]}]
return jsonify(dict_table)
if __name__ == '__main__':
app.run(debug=True)
| [
"OPENAI_PROMPT_FEEDBACK"
] |
2024-01-10 | 62442katieb/mbme-physio-denoising | msc_and_plot.py | import bids
import enlighten
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.signal import coherence
from os.path import join, exists
sns.set(style='white', context='talk')
mather = '/Users/katherine.b/Dropbox/Data/ds001242'
musser = '/Users/katherine.b/Dropbox/Data/musserk23'
diva = '/home/data/nbc/Laird_DIVA/dset'
datasets = [musser, mather]
manager = enlighten.get_manager()
for dataset in datasets:
dset_name = dataset.split('/')[-1]
dset = bids.BIDSLayout(dataset, derivatives=True)
files = dset.derivatives['PhysioComb'].get(extension='tsv', desc='filtered', suffix='physio', invalid_filters=True)
tocks = manager.counter(total=len(files), desc=dset_name, unit='files')
msc_ecg = pd.DataFrame()
msc_eda = pd.DataFrame()
for file in files:
fs = file.get_metadata()['SamplingFrequency']
dat = pd.read_table(file.path)
cardiac = dat.filter(regex='cardiac.*').columns
nperseg = fs * 4
subject = file.entities['subject']
base_path =file.path.replace(file.filename, '')
try:
session = file.entities['session']
no_mr_path = f'{base_path}sub-{subject}_ses-{session}_desc-noMR_physio.tsv'
except:
no_mr_path = f'{base_path}sub-{subject}_desc-noMR_physio.tsv'
base_path = file.path.replace(file.filename, '')
if exists(no_mr_path):
no_mr = pd.read_table(no_mr_path, header=0)
for col1 in cardiac:
if exists(no_mr_path):
# calculate MSC for no-MR here if dset doesn't contain ds0001242 or whatever
f, Cxy = coherence(dat[col1],
no_mr['cardiac'],
fs=fs,
nperseg=nperseg)
temp = pd.Series(data=Cxy,
index=f,
name=f'{col1}_no_mr')
msc_ecg = pd.concat([msc_ecg, temp],
axis=1)
else:
pass
for col2 in cardiac:
if col1 == col2:
pass
else:
f, Cxy = coherence(dat[col1],
dat[col2],
fs=fs,
nperseg=nperseg)
temp = pd.Series(data=Cxy,
index=f,
name=f'{col1}_{col2}')
msc_ecg = pd.concat([msc_ecg, temp],
axis=1)
eda = dat.filter(regex='scr.*').columns
for col1 in eda:
if exists(no_mr_path):
# calculate MSC for no-MR here if dset doesn't contain ds0001242 or whatever
f, Cxy = coherence(dat[col1],
no_mr['scr'],
fs=fs,
nperseg=nperseg * 10)
temp = pd.Series(data=Cxy,
index=f,
name=f'{col1}_no_mr')
msc_eda = pd.concat([msc_eda, temp],
axis=1)
for col2 in eda:
if col1 == col2:
pass
else:
f, Cxy = coherence(dat[col1],
dat[col2],
fs=fs,
nperseg=nperseg * 10)
temp = pd.Series(data=Cxy,
index=f,
name=f'{col1}_{col2}')
msc_eda = pd.concat([msc_eda, temp],
axis=1)
tocks.update()
new_names = [f.filename.split('_desc')[0] for f in files]
plots = msc_ecg.columns.unique()
for plot in plots:
temp = msc_ecg[plot]
temp.columns = new_names
fig,ax = plt.subplots(figsize=(5,5))
sns.lineplot(data=temp[temp.index < 60], lw=0.9, dashes=False)
ax.get_legend().remove()
ax.set_xlabel('Hz')
ax.set_ylabel('MSC')
ax.set_ylim([-0.1,1.1])
fig.savefig(f'{dset_name}_{plot}_msc-ecg.png', dpi=400, bbox_inches='tight')
plots = msc_eda.columns.unique()
for plot in plots:
temp = msc_eda[plot]
temp.columns = new_names
fig,ax = plt.subplots(figsize=(5,5))
sns.lineplot(data=temp[temp.index <= 1], lw=0.9, dashes=False)
ax.get_legend().remove()
ax.set_xlabel('Hz')
ax.set_ylabel('MSC')
ax.set_ylim([-0.1,1.1])
ax.set_xlim([-0.1,1.1])
fig.savefig(f'{dset_name}_{plot}_msc-eda.png', dpi=400, bbox_inches='tight')
| [] |
2024-01-10 | lhh-pi/modelscope | modelscope~models~cv~image_probing_model~backbone.py | # The implementation is adopted from OpenAI-CLIP,
# made pubicly available under the MIT License at https://github.com/openai/CLIP
import math
import sys
from collections import OrderedDict
from functools import reduce
from operator import mul
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from torchvision import models
from .utils import convert_weights, load_pretrained
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed
# after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool,
# and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict([('-1', nn.AvgPool2d(stride)),
('0',
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False)),
('1', nn.BatchNorm2d(planes * self.expansion))]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self,
spacial_dim: int,
embed_dim: int,
num_heads: int,
output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1],
x.shape[2] * x.shape[3]).permute(2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :].to(x.dtype)
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False)
return x[0]
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self,
d_model: int,
n_head: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)),
('gelu', QuickGELU()),
('c_proj', nn.Linear(d_model * 4, d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(
dtype=x.dtype,
device=x.device) if self.attn_mask is not None else None
return self.attn(
x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor, idx):
features = {}
x_norm = self.ln_1(x)
features['layer_{}_pre_attn'.format(idx)] = x_norm.permute(1, 0, 2)
attn = self.attention(x_norm)
features['layer_{}_attn'.format(idx)] = attn.permute(1, 0, 2)
x = x + attn
mlp = self.mlp(self.ln_2(x))
features['layer_{}_mlp'.format(idx)] = mlp.permute(1, 0, 2)
x = x + mlp
return x, features
class Transformer(nn.Module):
def __init__(self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.ModuleList()
for i in range(layers):
block = ResidualAttentionBlock(width, heads, attn_mask)
self.resblocks.append(block)
def forward(self, x: torch.Tensor):
features = {}
for idx, block in enumerate(self.resblocks):
x, block_feats = block(x, idx)
features.update(block_feats)
return x, features
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int,
layers: int, heads: int, output_dim: int):
super().__init__()
print(input_resolution, patch_size, width, layers, heads, output_dim)
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(
(input_resolution // patch_size)**2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, return_all=True):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1],
-1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
zeros = torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device)
# shape = [*, grid ** 2 + 1, width]
x = torch.cat([self.class_embedding.to(x.dtype) + zeros, x], dim=1)
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x, features = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if return_all:
features['pre_logits'] = x
return features
if self.proj is not None:
x = x @ self.proj
return x
class CLIPNet(nn.Module):
def __init__(self, arch_name, pretrained, **kwargs):
super(CLIPNet, self).__init__()
if arch_name == 'CLIP_ViTB32':
self.clip = VisualTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTB16', 'CLIP_ViTB16_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTL14', 'CLIP_ViTL14_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768)
else:
raise KeyError(f'Unsupported arch_name for CLIP, {arch_name}')
def forward(self, input_data):
output = self.clip(input_data)
return output
def CLIP(arch_name='CLIP_RN50',
use_pretrain=False,
load_from='',
state_dict=None,
**kwargs):
model = CLIPNet(arch_name=arch_name, pretrained=None, **kwargs)
if use_pretrain:
if arch_name.endswith('FP16'):
convert_weights(model.clip)
load_pretrained(model.clip, state_dict, load_from)
return model
class ProbingModel(torch.nn.Module):
def __init__(self, feat_size, num_classes):
super(ProbingModel, self).__init__()
self.linear = torch.nn.Linear(feat_size, num_classes)
def forward(self, x):
return self.linear(x)
| [] |
2024-01-10 | tekenable-org/azure-search-openai-demo | app~backend~approaches~chatreadretrieveread.py | import json
import logging
import re
from typing import Any, AsyncGenerator, Optional, Union
import aiohttp
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from approaches.approach import Approach
from core.messagebuilder import MessageBuilder
from core.modelhelper import get_token_limit
from text import nonewlines
class ChatReadRetrieveReadApproach(Approach):
# Chat roles
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
NO_RESPONSE = "0"
"""
Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
(answer) with that prompt.
"""
system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf].
{follow_up_questions_prompt}
{injected_prompt}
"""
follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next.
Enclose the follow-up questions in double angle brackets. Example:
<<Are there exclusions for prescriptions?>>
<<Which pharmacies can be ordered from?>>
<<What is the limit for over-the-counter medication?>>
Do no repeat questions that have already been asked.
Make sure the last question ends with ">>"."""
query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.
You have access to Azure Cognitive Search index with 100's of documents.
Generate a search query based on the conversation and the new question.
Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.
Do not include any text inside [] or <<>> in the search query terms.
Do not include any special characters like '+'.
If the question is not in English, translate the question to English before generating the search query.
If you cannot generate a search query, return just the number 0.
"""
query_prompt_few_shots = [
{"role": USER, "content": "What are my health plans?"},
{"role": ASSISTANT, "content": "Show available health plans"},
{"role": USER, "content": "does my plan cover cardio?"},
{"role": ASSISTANT, "content": "Health plan cardio coverage"},
]
def __init__(
self,
search_client: SearchClient,
openai_host: str,
chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI
chatgpt_model: str,
embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text"
embedding_model: str,
sourcepage_field: str,
content_field: str,
query_language: str,
query_speller: str,
):
self.search_client = search_client
self.openai_host = openai_host
self.chatgpt_deployment = chatgpt_deployment
self.chatgpt_model = chatgpt_model
self.embedding_deployment = embedding_deployment
self.embedding_model = embedding_model
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.query_language = query_language
self.query_speller = query_speller
self.chatgpt_token_limit = get_token_limit(chatgpt_model)
async def run_until_final_call(
self,
history: list[dict[str, str]],
overrides: dict[str, Any],
auth_claims: dict[str, Any],
should_stream: bool = False,
) -> tuple:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top", 3)
filter = self.build_filter(overrides, auth_claims)
original_user_query = history[-1]["content"]
user_query_request = "Generate search query for: " + original_user_query
functions = [
{
"name": "search_sources",
"description": "Retrieve sources from the Azure Cognitive Search index",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "Query string to retrieve documents from azure search eg: 'Health care plan'",
}
},
"required": ["search_query"],
},
}
]
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
messages = self.get_messages_from_history(
system_prompt=self.query_prompt_template,
model_id=self.chatgpt_model,
history=history,
user_content=user_query_request,
max_tokens=self.chatgpt_token_limit - len(user_query_request),
few_shots=self.query_prompt_few_shots,
)
chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {}
chat_completion = await openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=0.0,
max_tokens=100, # Setting too low risks malformed JSON, setting too high may affect performance
n=1,
functions=functions,
function_call="auto",
)
query_text = self.get_search_query(chat_completion, original_user_query)
# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {}
embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text)
query_vector = embedding["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = None
# Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(
query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language=self.query_language,
query_speller=self.query_speller,
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
else:
r = await self.search_client.search(
query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
if use_semantic_captions:
results = [
doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]]))
async for doc in r
]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r]
content = "\n".join(results)
follow_up_questions_prompt = (
self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else ""
)
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_template")
if prompt_override is None:
system_message = self.system_message_chat_conversation.format(
injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt
)
elif prompt_override.startswith(">>>"):
system_message = self.system_message_chat_conversation.format(
injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt
)
else:
system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt)
response_token_limit = 1024
messages_token_limit = self.chatgpt_token_limit - response_token_limit
messages = self.get_messages_from_history(
system_prompt=system_message,
model_id=self.chatgpt_model,
history=history,
# Model does not handle lengthy system messages well. Moving sources to latest user conversation to solve follow up questions prompt.
user_content=original_user_query + "\n\nSources:\n" + content,
max_tokens=messages_token_limit,
)
msg_to_display = "\n\n".join([str(message) for message in messages])
extra_info = {
"data_points": results,
"thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>"
+ msg_to_display.replace("\n", "<br>"),
}
chat_coroutine = openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=overrides.get("temperature") or 0.7,
max_tokens=response_token_limit,
n=1,
stream=should_stream,
)
return (extra_info, chat_coroutine)
async def run_without_streaming(
self,
history: list[dict[str, str]],
overrides: dict[str, Any],
auth_claims: dict[str, Any],
session_state: Any = None,
) -> dict[str, Any]:
extra_info, chat_coroutine = await self.run_until_final_call(
history, overrides, auth_claims, should_stream=False
)
chat_resp = dict(await chat_coroutine)
chat_resp["choices"][0]["context"] = extra_info
if overrides.get("suggest_followup_questions"):
content, followup_questions = self.extract_followup_questions(chat_resp["choices"][0]["message"]["content"])
chat_resp["choices"][0]["message"]["content"] = content
chat_resp["choices"][0]["context"]["followup_questions"] = followup_questions
chat_resp["choices"][0]["session_state"] = session_state
return chat_resp
async def run_with_streaming(
self,
history: list[dict[str, str]],
overrides: dict[str, Any],
auth_claims: dict[str, Any],
session_state: Any = None,
) -> AsyncGenerator[dict, None]:
extra_info, chat_coroutine = await self.run_until_final_call(
history, overrides, auth_claims, should_stream=True
)
yield {
"choices": [
{
"delta": {"role": self.ASSISTANT},
"context": extra_info,
"session_state": session_state,
"finish_reason": None,
"index": 0,
}
],
"object": "chat.completion.chunk",
}
followup_questions_started = False
followup_content = ""
async for event in await chat_coroutine:
# "2023-07-01-preview" API version has a bug where first response has empty choices
if event["choices"]:
# if event contains << and not >>, it is start of follow-up question, truncate
content = event["choices"][0]["delta"].get("content", "")
if overrides.get("suggest_followup_questions") and "<<" in content:
followup_questions_started = True
earlier_content = content[: content.index("<<")]
if earlier_content:
event["choices"][0]["delta"]["content"] = earlier_content
yield event
followup_content += content[content.index("<<") :]
elif followup_questions_started:
followup_content += content
else:
yield event
if followup_content:
_, followup_questions = self.extract_followup_questions(followup_content)
yield {
"choices": [
{
"delta": {"role": self.ASSISTANT},
"context": {"followup_questions": followup_questions},
"finish_reason": None,
"index": 0,
}
],
"object": "chat.completion.chunk",
}
async def run(
self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {}
) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:
overrides = context.get("overrides", {})
auth_claims = context.get("auth_claims", {})
if stream is False:
# Workaround for: https://github.com/openai/openai-python/issues/371
async with aiohttp.ClientSession() as s:
openai.aiosession.set(s)
response = await self.run_without_streaming(messages, overrides, auth_claims, session_state)
return response
else:
return self.run_with_streaming(messages, overrides, auth_claims, session_state)
def get_messages_from_history(
self,
system_prompt: str,
model_id: str,
history: list[dict[str, str]],
user_content: str,
max_tokens: int,
few_shots=[],
) -> list:
message_builder = MessageBuilder(system_prompt, model_id)
# Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message.
for shot in few_shots:
message_builder.append_message(shot.get("role"), shot.get("content"))
append_index = len(few_shots) + 1
message_builder.append_message(self.USER, user_content, index=append_index)
total_token_count = message_builder.count_tokens_for_message(message_builder.messages[-1])
newest_to_oldest = list(reversed(history[:-1]))
for message in newest_to_oldest:
potential_message_count = message_builder.count_tokens_for_message(message)
if (total_token_count + potential_message_count) > max_tokens:
logging.debug("Reached max tokens of %d, history will be truncated", max_tokens)
break
message_builder.append_message(message["role"], message["content"], index=append_index)
total_token_count += potential_message_count
return message_builder.messages
def get_search_query(self, chat_completion: dict[str, Any], user_query: str):
response_message = chat_completion["choices"][0]["message"]
if function_call := response_message.get("function_call"):
if function_call["name"] == "search_sources":
arg = json.loads(function_call["arguments"])
search_query = arg.get("search_query", self.NO_RESPONSE)
if search_query != self.NO_RESPONSE:
return search_query
elif query_text := response_message.get("content"):
if query_text.strip() != self.NO_RESPONSE:
return query_text
return user_query
def extract_followup_questions(self, content: str):
return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content)
| [
"Show available health plans",
"Health plan cardio coverage",
"What are my health plans?",
"Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.\nYou have access to Azure Cognitive Search index with 100's of documents.\nGenerate a search query based on the conversation and the new question.\nDo not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.\nDo not include any text inside [] or <<>> in the search query terms.\nDo not include any special characters like '+'.\nIf the question is not in English, translate the question to English before generating the search query.\nIf you cannot generate a search query, return just the number 0.\n",
"suggest_followup_questions",
"Generate 3 very brief follow-up questions that the user would likely ask next.\nEnclose the follow-up questions in double angle brackets. Example:\n<<Are there exclusions for prescriptions?>>\n<<Which pharmacies can be ordered from?>>\n<<What is the limit for over-the-counter medication?>>\nDo no repeat questions that have already been asked.\nMake sure the last question ends with \">>\".",
"does my plan cover cardio?",
"prompt_template"
] |
2024-01-10 | tekenable-org/azure-search-openai-demo | tests~conftest.py | import argparse
import json
import os
from collections import namedtuple
from unittest import mock
import aiohttp
import azure.storage.filedatalake
import azure.storage.filedatalake.aio
import msal
import openai
import pytest
import pytest_asyncio
from azure.search.documents.aio import SearchClient
import app
from core.authentication import AuthenticationHelper
MockToken = namedtuple("MockToken", ["token", "expires_on"])
class MockAzureCredential:
async def get_token(self, uri):
return MockToken("mock_token", 9999999999)
@pytest.fixture
def mock_openai_embedding(monkeypatch):
async def mock_acreate(*args, **kwargs):
if openai.api_type == "openai":
assert kwargs.get("deployment_id") is None
else:
assert kwargs.get("deployment_id") is not None
return {"data": [{"embedding": [0.1, 0.2, 0.3]}]}
monkeypatch.setattr(openai.Embedding, "acreate", mock_acreate)
@pytest.fixture
def mock_openai_chatcompletion(monkeypatch):
class AsyncChatCompletionIterator:
def __init__(self, answer: str):
self.responses = [
{"object": "chat.completion.chunk", "choices": []},
{"object": "chat.completion.chunk", "choices": [{"delta": {"role": "assistant"}}]},
]
# Split at << to simulate chunked responses
if answer.find("<<") > -1:
parts = answer.split("<<")
self.responses.append(
{
"object": "chat.completion.chunk",
"choices": [{"delta": {"role": "assistant", "content": parts[0] + "<<"}}],
}
)
self.responses.append(
{
"object": "chat.completion.chunk",
"choices": [{"delta": {"role": "assistant", "content": parts[1]}}],
}
)
else:
self.responses.append(
{
"object": "chat.completion.chunk",
"choices": [{"delta": {"content": answer}}],
}
)
def __aiter__(self):
return self
async def __anext__(self):
if self.responses:
return self.responses.pop(0)
else:
raise StopAsyncIteration
async def mock_acreate(*args, **kwargs):
if openai.api_type == "openai":
assert kwargs.get("deployment_id") is None
else:
assert kwargs.get("deployment_id") is not None
messages = kwargs["messages"]
if messages[-1]["content"] == "Generate search query for: What is the capital of France?":
answer = "capital of France"
else:
answer = "The capital of France is Paris. [Benefit_Options-2.pdf]."
if messages[0]["content"].find("Generate 3 very brief follow-up questions") > -1:
answer = "The capital of France is Paris. [Benefit_Options-2.pdf]. <<What is the capital of Spain?>>"
if "stream" in kwargs and kwargs["stream"] is True:
return AsyncChatCompletionIterator(answer)
else:
return openai.util.convert_to_openai_object(
{"object": "chat.completion", "choices": [{"message": {"role": "assistant", "content": answer}}]}
)
monkeypatch.setattr(openai.ChatCompletion, "acreate", mock_acreate)
@pytest.fixture
def mock_acs_search(monkeypatch):
class Caption:
def __init__(self, text):
self.text = text
class AsyncSearchResultsIterator:
def __init__(self):
self.num = 1
def __aiter__(self):
return self
async def __anext__(self):
if self.num == 1:
self.num = 0
return {
"sourcepage": "Benefit_Options-2.pdf",
"sourcefile": "Benefit_Options.pdf",
"content": "There is a whistleblower policy.",
"embeddings": [],
"category": None,
"id": "file-Benefit_Options_pdf-42656E656669745F4F7074696F6E732E706466-page-2",
"@search.score": 0.03279569745063782,
"@search.reranker_score": 3.4577205181121826,
"@search.highlights": None,
"@search.captions": [Caption("Caption: A whistleblower policy.")],
}
else:
raise StopAsyncIteration
async def mock_search(*args, **kwargs):
return AsyncSearchResultsIterator()
monkeypatch.setattr(SearchClient, "search", mock_search)
@pytest.fixture
def mock_acs_search_filter(monkeypatch):
class AsyncSearchResultsIterator:
def __init__(self):
self.num = 1
def __aiter__(self):
return self
async def __anext__(self):
raise StopAsyncIteration
async def mock_search(self, *args, **kwargs):
self.filter = kwargs.get("filter")
return AsyncSearchResultsIterator()
monkeypatch.setattr(SearchClient, "search", mock_search)
envs = [
{
"OPENAI_HOST": "openai",
"OPENAI_API_KEY": "secretkey",
"OPENAI_ORGANIZATION": "organization",
},
{
"OPENAI_HOST": "azure",
"AZURE_OPENAI_SERVICE": "test-openai-service",
"AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt",
"AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada",
},
]
auth_envs = [
{
"OPENAI_HOST": "azure",
"AZURE_OPENAI_SERVICE": "test-openai-service",
"AZURE_OPENAI_CHATGPT_DEPLOYMENT": "test-chatgpt",
"AZURE_OPENAI_EMB_DEPLOYMENT": "test-ada",
"AZURE_USE_AUTHENTICATION": "true",
"AZURE_SERVER_APP_ID": "SERVER_APP",
"AZURE_SERVER_APP_SECRET": "SECRET",
"AZURE_CLIENT_APP_ID": "CLIENT_APP",
"AZURE_TENANT_ID": "TENANT_ID",
},
]
@pytest.fixture(params=envs, ids=["client0", "client1"])
def mock_env(monkeypatch, request):
with mock.patch.dict(os.environ, clear=True):
monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account")
monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container")
monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index")
monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service")
monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo")
monkeypatch.setenv("ALLOWED_ORIGIN", "https://frontend.com")
for key, value in request.param.items():
monkeypatch.setenv(key, value)
if os.getenv("AZURE_USE_AUTHENTICATION") is not None:
monkeypatch.delenv("AZURE_USE_AUTHENTICATION")
with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential:
mock_default_azure_credential.return_value = MockAzureCredential()
yield
@pytest_asyncio.fixture()
async def client(monkeypatch, mock_env, mock_openai_chatcompletion, mock_openai_embedding, mock_acs_search, request):
quart_app = app.create_app()
async with quart_app.test_app() as test_app:
quart_app.config.update({"TESTING": True})
yield test_app.test_client()
@pytest_asyncio.fixture(params=auth_envs)
async def auth_client(
monkeypatch,
mock_openai_chatcompletion,
mock_openai_embedding,
mock_confidential_client_success,
mock_list_groups_success,
mock_acs_search_filter,
request,
):
monkeypatch.setenv("AZURE_STORAGE_ACCOUNT", "test-storage-account")
monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container")
monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index")
monkeypatch.setenv("AZURE_SEARCH_SERVICE", "test-search-service")
monkeypatch.setenv("AZURE_OPENAI_CHATGPT_MODEL", "gpt-35-turbo")
for key, value in request.param.items():
monkeypatch.setenv(key, value)
with mock.patch("app.DefaultAzureCredential") as mock_default_azure_credential:
mock_default_azure_credential.return_value = MockAzureCredential()
quart_app = app.create_app()
async with quart_app.test_app() as test_app:
quart_app.config.update({"TESTING": True})
client = test_app.test_client()
client.config = quart_app.config
yield client
@pytest.fixture
def mock_confidential_client_success(monkeypatch):
def mock_acquire_token_on_behalf_of(self, *args, **kwargs):
assert kwargs.get("user_assertion") is not None
scopes = kwargs.get("scopes")
assert scopes == [AuthenticationHelper.scope]
return {"access_token": "MockToken", "id_token_claims": {"oid": "OID_X", "groups": ["GROUP_Y", "GROUP_Z"]}}
monkeypatch.setattr(
msal.ConfidentialClientApplication, "acquire_token_on_behalf_of", mock_acquire_token_on_behalf_of
)
def mock_init(self, *args, **kwargs):
pass
monkeypatch.setattr(msal.ConfidentialClientApplication, "__init__", mock_init)
@pytest.fixture
def mock_confidential_client_unauthorized(monkeypatch):
def mock_acquire_token_on_behalf_of(self, *args, **kwargs):
assert kwargs.get("user_assertion") is not None
scopes = kwargs.get("scopes")
assert scopes == [AuthenticationHelper.scope]
return {"error": "unauthorized"}
monkeypatch.setattr(
msal.ConfidentialClientApplication, "acquire_token_on_behalf_of", mock_acquire_token_on_behalf_of
)
def mock_init(self, *args, **kwargs):
pass
monkeypatch.setattr(msal.ConfidentialClientApplication, "__init__", mock_init)
@pytest.fixture
def mock_confidential_client_overage(monkeypatch):
def mock_acquire_token_on_behalf_of(self, *args, **kwargs):
assert kwargs.get("user_assertion") is not None
scopes = kwargs.get("scopes")
assert scopes == [AuthenticationHelper.scope]
return {
"access_token": "MockToken",
"id_token_claims": {
"oid": "OID_X",
"_claim_names": {"groups": "src1"},
"_claim_sources": {"src1": {"endpoint": "https://example.com"}},
},
}
monkeypatch.setattr(
msal.ConfidentialClientApplication, "acquire_token_on_behalf_of", mock_acquire_token_on_behalf_of
)
def mock_init(self, *args, **kwargs):
pass
monkeypatch.setattr(msal.ConfidentialClientApplication, "__init__", mock_init)
class MockResponse:
def __init__(self, text, status):
self.text = text
self.status = status
async def text(self):
return self._text
async def __aexit__(self, exc_type, exc, tb):
pass
async def __aenter__(self):
return self
async def json(self):
return json.loads(self.text)
@pytest.fixture
def mock_list_groups_success(monkeypatch):
class MockListResponse:
def __init__(self):
self.num = 2
def run(self, *args, **kwargs):
if self.num == 2:
self.num = 1
return MockResponse(
text=json.dumps(
{"@odata.nextLink": "https://odatanextlink.com", "value": [{"id": "OVERAGE_GROUP_Y"}]}
),
status=200,
)
if self.num == 1:
assert kwargs.get("url") == "https://odatanextlink.com"
self.num = 0
return MockResponse(text=json.dumps({"value": [{"id": "OVERAGE_GROUP_Z"}]}), status=200)
raise Exception("too many runs")
mock_list_response = MockListResponse()
def mock_get(*args, **kwargs):
return mock_list_response.run(*args, **kwargs)
monkeypatch.setattr(aiohttp.ClientSession, "get", mock_get)
@pytest.fixture
def mock_list_groups_unauthorized(monkeypatch):
class MockListResponse:
def __init__(self):
self.num = 1
def run(self, *args, **kwargs):
if self.num == 1:
self.num = 0
return MockResponse(text=json.dumps({"error": "unauthorized"}), status=401)
raise Exception("too many runs")
mock_list_response = MockListResponse()
def mock_get(*args, **kwargs):
return mock_list_response.run(*args, **kwargs)
monkeypatch.setattr(aiohttp.ClientSession, "get", mock_get)
@pytest.fixture
def mock_data_lake_service_client(monkeypatch):
def mock_init(self, *args, **kwargs):
pass
async def mock_aenter(self, *args, **kwargs):
return self
async def mock_aexit(self, *args, **kwargs):
return self
def mock_get_file_system_client(self, *args, **kwargs):
return azure.storage.filedatalake.FileSystemClient(account_url=None, file_system_name=None, credential=None)
def mock_init_service_client_aio(self, *args, **kwargs):
self.filesystems = {}
def mock_get_file_system_client_aio(self, name, *args, **kwargs):
if name in self.filesystems:
return self.filesystems[name]
self.filesystems[name] = azure.storage.filedatalake.aio.FileSystemClient(
account_url=None, file_system_name=None, credential=None
)
return self.filesystems[name]
monkeypatch.setattr(azure.storage.filedatalake.DataLakeServiceClient, "__init__", mock_init)
monkeypatch.setattr(
azure.storage.filedatalake.DataLakeServiceClient, "get_file_system_client", mock_get_file_system_client
)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeServiceClient, "__init__", mock_init_service_client_aio)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeServiceClient, "__aenter__", mock_aenter)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeServiceClient, "__aexit__", mock_aexit)
monkeypatch.setattr(
azure.storage.filedatalake.aio.DataLakeServiceClient, "get_file_system_client", mock_get_file_system_client_aio
)
def mock_init_filesystem_aio(self, *args, **kwargs):
self.directories = {}
def mock_get_file_client(self, path, *args, **kwargs):
return azure.storage.filedatalake.DataLakeFileClient(
account_url=None, file_system_name=None, file_path=path, credential=None
)
async def mock_exists_aio(self, *args, **kwargs):
return False
async def mock_create_filesystem_aio(self, *args, **kwargs):
pass
async def mock_create_directory_aio(self, directory, *args, **kwargs):
if directory in self.directories:
return self.directories[directory]
self.directories[directory] = azure.storage.filedatalake.aio.DataLakeDirectoryClient(directory)
return self.directories[directory]
def mock_get_root_directory_client_aio(self, *args, **kwargs):
if "/" in self.directories:
return self.directories["/"]
self.directories["/"] = azure.storage.filedatalake.aio.DataLakeDirectoryClient("/")
self.directories["/"].child_directories = self.directories
return self.directories["/"]
def mock_get_paths(self, *args, **kwargs):
return [argparse.Namespace(is_directory=False, name=name) for name in ["a.txt", "b.txt", "c.txt"]]
monkeypatch.setattr(azure.storage.filedatalake.FileSystemClient, "__init__", mock_init)
monkeypatch.setattr(azure.storage.filedatalake.FileSystemClient, "get_file_client", mock_get_file_client)
monkeypatch.setattr(azure.storage.filedatalake.FileSystemClient, "get_paths", mock_get_paths)
monkeypatch.setattr(azure.storage.filedatalake.aio.FileSystemClient, "__init__", mock_init_filesystem_aio)
monkeypatch.setattr(azure.storage.filedatalake.aio.FileSystemClient, "__aenter__", mock_aenter)
monkeypatch.setattr(azure.storage.filedatalake.aio.FileSystemClient, "__aexit__", mock_aexit)
monkeypatch.setattr(azure.storage.filedatalake.aio.FileSystemClient, "get_paths", mock_get_paths)
monkeypatch.setattr(azure.storage.filedatalake.aio.FileSystemClient, "exists", mock_exists_aio)
monkeypatch.setattr(
azure.storage.filedatalake.aio.FileSystemClient, "create_file_system", mock_create_filesystem_aio
)
monkeypatch.setattr(azure.storage.filedatalake.aio.FileSystemClient, "create_directory", mock_create_directory_aio)
monkeypatch.setattr(
azure.storage.filedatalake.aio.FileSystemClient,
"_get_root_directory_client",
mock_get_root_directory_client_aio,
)
def mock_init_file(self, *args, **kwargs):
self.path = kwargs.get("file_path")
self.acl = ""
def mock_download_file(self, *args, **kwargs):
return azure.storage.filedatalake.StorageStreamDownloader(None)
def mock_download_file_aio(self, *args, **kwargs):
return azure.storage.filedatalake.aio.StorageStreamDownloader(None)
def mock_get_access_control(self, *args, **kwargs):
if self.path.name == "a.txt":
return {"acl": "user:A-USER-ID:r-x,group:A-GROUP-ID:r-x"}
if self.path.name == "b.txt":
return {"acl": "user:B-USER-ID:r-x,group:B-GROUP-ID:r-x"}
if self.path.name == "c.txt":
return {"acl": "user:C-USER-ID:r-x,group:C-GROUP-ID:r-x"}
raise Exception(f"Unexpected path {self.path.name}")
async def mock_upload_data_aio(self, *args, **kwargs):
self.uploaded = True
pass
monkeypatch.setattr(azure.storage.filedatalake.DataLakeFileClient, "__init__", mock_init_file)
monkeypatch.setattr(azure.storage.filedatalake.DataLakeFileClient, "download_file", mock_download_file)
monkeypatch.setattr(azure.storage.filedatalake.DataLakeFileClient, "get_access_control", mock_get_access_control)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeFileClient, "__init__", mock_init_file)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeFileClient, "__aenter__", mock_aenter)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeFileClient, "__aexit__", mock_aexit)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeFileClient, "download_file", mock_download_file_aio)
monkeypatch.setattr(
azure.storage.filedatalake.aio.DataLakeFileClient, "get_access_control", mock_get_access_control
)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeFileClient, "upload_data", mock_upload_data_aio)
def mock_init_directory(self, path, *args, **kwargs):
self.path = path
self.files = {}
def mock_directory_get_file_client(self, *args, **kwargs):
path = kwargs.get("file")
if path in self.files:
return self.files[path]
self.files[path] = azure.storage.filedatalake.aio.DataLakeFileClient(path)
return self.files[path]
async def mock_update_access_control_recursive_aio(self, acl, *args, **kwargs):
for file in self.files.values():
if len(file.acl) > 0:
file.acl += ","
file.acl += acl
if self.path == "/":
for directory in self.child_directories.values():
await mock_update_access_control_recursive_aio(directory, acl)
async def mock_close_aio(self, *args, **kwargs):
pass
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeDirectoryClient, "__init__", mock_init_directory)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeDirectoryClient, "__aenter__", mock_aenter)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeDirectoryClient, "__aexit__", mock_aexit)
monkeypatch.setattr(
azure.storage.filedatalake.aio.DataLakeDirectoryClient, "get_file_client", mock_directory_get_file_client
)
monkeypatch.setattr(
azure.storage.filedatalake.aio.DataLakeDirectoryClient,
"update_access_control_recursive",
mock_update_access_control_recursive_aio,
)
monkeypatch.setattr(azure.storage.filedatalake.aio.DataLakeDirectoryClient, "close", mock_close_aio)
def mock_readinto(self, *args, **kwargs):
pass
monkeypatch.setattr(azure.storage.filedatalake.StorageStreamDownloader, "__init__", mock_init)
monkeypatch.setattr(azure.storage.filedatalake.StorageStreamDownloader, "readinto", mock_readinto)
monkeypatch.setattr(azure.storage.filedatalake.aio.StorageStreamDownloader, "__init__", mock_init)
monkeypatch.setattr(azure.storage.filedatalake.aio.StorageStreamDownloader, "readinto", mock_readinto)
| [
"PLACEHOLDER<<",
"There is a whistleblower policy."
] |
2024-01-10 | con2000us/GPT_transcriber | moduel~k2_old.py | ## input : config.AIPunctuationFile
## output : config.AITranslationFile
from openai import OpenAI
import json
import time
import config_reader
import re
config = config_reader.config
# 在开始翻译之前清空 trans.json 文件
with open(config.AITranslationFile, 'w', encoding='utf-8') as file:
file.write('')
with open(config.AITranslationLog, 'w', encoding='utf-8') as file:
file.write('')
with open('moduel/API.key', 'r') as file:
key = file.read()
client = OpenAI(
api_key=key,
)
# 步骤 1: 创建一个助手
assistant = client.beta.assistants.create(
name="John Doe",
instructions="你是個英語翻譯,負責翻譯英文轉成文句通暢的中文",
model=config.AIModel
)
with open(config.AIPunctuationFile, 'r', encoding='utf-8') as file:
subtitles = json.load(file)
# 初始化变量
sentences_per_batch = 10
total_sentences = len(subtitles) # 获取实际的字幕数量
batches = (total_sentences + sentences_per_batch - 1) // sentences_per_batch # 计算需要的批次数量
translated_subtitles = [] # 存储翻译后的字幕
debug_trans = ""
# 循环处理每个批次
for batch in range(batches):
currentReq = 0
while True:
currentReq += 1
start_index = batch * sentences_per_batch
end_index = min(start_index + sentences_per_batch, total_sentences) # 确保不超过字幕总数
batch_subtitles = subtitles[start_index:end_index]
#print(json.dumps(batch_subtitles, ensure_ascii=False, indent=4))
# 构建消息内容,包括字幕文本
subtitles_text = "\n".join([f"{subtitle['start']}##{subtitle['text']}" for subtitle in batch_subtitles])
# message_content = "##前面的數值與##本身不須更動並且必須保留 只將後面字串內容翻譯成繁體中文 並保持一句原文對應一句翻譯的中文關係. \n" + subtitles_text
message_content = subtitles_text
print(f"########################################## {batch + 1} / {batches} ##################################################")
# print(f"{message_content}")
print(f"{subtitles_text}")
print(f"------------------------------------------ {batch + 1} / {batches} --------------------------------------------------")
debug_trans += "############################################################################################\n"
debug_trans += subtitles_text
debug_trans += "\n--------------------------------------------------------------------------------------------\n"
# 步骤 2: 创建一个线程
thread = client.beta.threads.create()
# 步骤 3: 向线程添加一条消息
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=message_content
)
# 步骤 4: 运行助手
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="你要把字幕內容翻譯成中文 每行字串的最前面是一個浮點數的時間戳記與##分割符號 這兩樣內容在輸出時不須更動並且必須保留 字串後面的內容需要翻譯成繁體中文 並保持一句原文對應一句翻譯的中文關係. 絕對不能有上下文合併到同一行文字 不然時間戳記會錯誤"
)
# 检查运行状态
while True:
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
if run.status == 'completed':
break
time.sleep(1) # 等待一秒再次检查
# 步骤 5: 显示助手的回应并处理翻译结果
messages = client.beta.threads.messages.list(thread_id=thread.id)
for message in messages.data:
if message.role == "assistant":
for content in message.content:
if content.type == 'text':
# 顯示翻譯進度
text = content.text.value
text = text.replace('\\(', '(').replace('\\)', ')')
debug_trans += text + "\n"
translated_text = content.text.value
#translated_sentences = translated_text.split('\n')
translated_sentences = [line for line in translated_text.split('\n') if line.strip()]
# 移除非翻譯結果的內容
# 正則表達式匹配一個數字(整數或浮點數)後跟著 '##'
pattern = r'\b\d+(\.\d+)?##'
sharpNum = sum(1 for sentence in translated_sentences if re.search(pattern, sentence))
i = len(translated_sentences) - 1
inputLines = len(subtitles_text.split("\n"))
# print(f"sharpNum : {sharpNum}, inputLines : {inputLines}")
while i >= 0:
if len(translated_sentences[i].strip())<=1:
translated_sentences.pop(i)
# 假如API傳回無意義字串 其他字串有時間戳 則將無意義字串刪除
if(sharpNum == inputLines and re.search(pattern, translated_sentences[i]) is None):
translated_sentences.pop(i)
i -= 1
for trans_sentence in translated_sentences:
print(f"{trans_sentence}")
if re.search(pattern, trans_sentence):
for subtitle in batch_subtitles:
#print(f"found ## in {trans_sentence}")
token = trans_sentence.split('##')
if str(subtitle['start']) == str(token[0]) and len(token[1].strip()) > 0:
subtitle['trans'] = token[-1]
break # 找到匹配项后跳出内层循环
translated_subtitles.append(subtitle) # 将处理过的字幕添加到列表中
else:
if config.forceMatch and currentReq <= config.maxReqTime:
continue
guess = 0
while guess < sentences_per_batch and 'trans' in batch_subtitles[guess]:
guess += 1
# fix it!
if guess >= len(batch_subtitles):
guess = len(batch_subtitles)-1
batch_subtitles[guess]['trans'] = trans_sentence
translated_subtitles.append(batch_subtitles[guess]) # 将处理过的字幕添加到列表中
break
# 檢查行數是否對應正確
inputLines = subtitles_text.split("\n")
if len(translated_sentences) != len(inputLines):
if currentReq >= config.maxReqTime: # maxRetryLimit 是您設定的最大重試次數
print(f"{len(inputLines)} -> {len(translated_sentences)}翻譯行數不符,但已達到最大重試次數。")
break
print(f"{len(inputLines)} -> {len(translated_sentences)}翻譯行數不符,正在重新嘗試...")
debug_trans += "\n翻譯行數不符,正在重新嘗試...\n"
currentReq += 1
continue # 重新進入迴圈進行請求
if '##' in trans_sentence or not config.forceMatch or currentReq > config.maxReqTime:
if currentReq > config.maxReqTime:
currentReq = 0
break
with open(config.AITranslationLog, 'a', encoding='utf-8') as file:
file.write(debug_trans)
# 将翻译后的字幕追加写入 JSON 文件
with open(config.AITranslationFile, 'a', encoding='utf-8') as file:
json.dump(translated_subtitles, file, ensure_ascii=False, indent=4)
| [] |
2024-01-10 | con2000us/GPT_transcriber | moduel~k2.py | ## input : config.AIPunctuationFile
## output : config.AITranslationFile
from openai import OpenAI
import os
import json
import time
import config_reader
import re
config = config_reader.config
process_path = os.path.join("process")
with open(os.path.join(process_path,config.workingFile), 'r') as file:
lng = json.load(file)['lng']
# 檢查字幕語言是否為中文
if lng == "zh":
print("*****原始語言為中文 不須翻譯*****")
with open(os.path.join(process_path,config.AIPunctuationFile), 'r', encoding='utf-8') as file:
subtitles = json.load(file)
translated_subtitles = [] # 存储翻译后的字幕
for subtitle in subtitles:
subtitle["trans"] = subtitle["text"]
translated_subtitles.append(subtitle)
with open(os.path.join(process_path,config.AITranslationFile), 'w', encoding='utf-8') as file:
json.dump(translated_subtitles, file, ensure_ascii=False, indent=4)
with open(os.path.join(process_path,config.AITranslationLog), 'w', encoding='utf-8') as file:
file.write('')
# 非中文字幕將執行翻譯
else:
# 在开始翻译之前清空 trans.json 文件
with open(os.path.join(process_path,config.AITranslationFile), 'w', encoding='utf-8') as file:
file.write('')
with open(os.path.join(process_path,config.AITranslationLog), 'w', encoding='utf-8') as file:
file.write('')
with open('moduel/API.key', 'r') as file:
key = file.read()
client = OpenAI(
api_key=key,
)
# 步骤 1: 创建一个助手
assistant = client.beta.assistants.create(
name="John Doe",
instructions="你是個翻譯,負責翻譯字幕轉成文句通暢的中文",
model=config.AIModel
)
with open(os.path.join(process_path,config.AIPunctuationFile), 'r', encoding='utf-8') as file:
subtitles = json.load(file)
# 初始化变量
sentences_per_batch = 10
total_sentences = len(subtitles) # 获取实际的字幕数量
batches = (total_sentences + sentences_per_batch - 1) // sentences_per_batch # 计算需要的批次数量
translated_subtitles = [] # 存储翻译后的字幕
debug_trans = ""
def translteByLine(batch_subtitles, inputs, outputs):
debug_trans = ""
for idx, line in enumerate(inputs, start=1):
reTry = 0
while True:
token = line.split('##')
print(f"----------- 逐行翻譯 {idx} / {len(inputs)}-----------")
print(f"@{token[0]}")
print(f"{token[-1]}")
debug_trans += f"----------- 逐行翻譯 {idx} / {len(inputs)}-----------\n"
debug_trans += f"@{line}\n"
# 步骤 2: 创建一个线程
thread = client.beta.threads.create()
# 步骤 3: 向线程添加一条消息
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=token[-1]
)
# 步骤 4: 运行助手
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="把內容翻譯成中文 並輸出成單一行"
)
# 检查运行状态
while True:
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
if run.status == 'completed':
break
time.sleep(1) # 等待一秒再次检查
# 步骤 5: 显示助手的回应并处理翻译结果
messages = client.beta.threads.messages.list(thread_id=thread.id)
for message in messages.data:
if message.role == "assistant":
for content in message.content:
if content.type == 'text':
# 顯示翻譯進度
text = content.text.value
text = text.replace('\\(', '(').replace('\\)', ')')
debug_trans += text + "\n"
translated_text = content.text.value
translated_sentences = [line for line in translated_text.split('\n') if line.strip()]
if len(translated_sentences) == 1:
print(f"{translated_sentences[0]}")
# debug_trans += f"{translated_sentences[0]}"
# 將翻譯內容匹配回原文
for subtitle in batch_subtitles:
if str(subtitle['start']) == str(token[0]):
subtitle['trans'] = translated_sentences[0]
print(f"***匹配成功***")
debug_trans += f" ***匹配成功***\n"
translated_subtitles.append(subtitle) # 将处理过的字幕添加到列表中
break
break
else:
print(f"------------輸出行數錯誤 重新測試----------------")
debug_trans += f"------------輸出行數錯誤 重新測試----------------"
reTry += 1
if reTry < config.maxReqTime:
continue
else:
print(f"------------已達最大錯誤次數----------------")
debug_trans += f"------------已達最大錯誤次數----------------"
break
break
break
return debug_trans
# 循环处理每个批次
for batch in range(batches):
currentReq = 0
while True:
currentReq += 1
start_index = batch * sentences_per_batch
end_index = min(start_index + sentences_per_batch, total_sentences) # 确保不超过字幕总数
batch_subtitles = subtitles[start_index:end_index]
# 构建消息内容,包括字幕文本
subtitles_text = "\n".join([f"{subtitle['start']}##{subtitle['text']}" for subtitle in batch_subtitles])
# message_content = "##前面的數值與##本身不須更動並且必須保留 只將後面字串內容翻譯成繁體中文 並保持一句原文對應一句翻譯的中文關係. \n" + subtitles_text
message_content = subtitles_text
print(f"########################################## {batch + 1} / {batches} ##################################################")
# print(f"{message_content}")
print(f"{subtitles_text}")
print(f"------------------------------------------ {batch + 1} / {batches} --------------------------------------------------")
debug_trans += "############################################################################################\n"
debug_trans += subtitles_text
debug_trans += "\n--------------------------------------------------------------------------------------------\n"
# 步骤 2: 创建一个线程
thread = client.beta.threads.create()
# 步骤 3: 向线程添加一条消息
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=message_content
)
# 步骤 4: 运行助手
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="你要把字幕內容翻譯成中文 每行字串的最前面是一個浮點數的時間戳記與##分割符號 這兩樣內容在輸出時不須更動並且必須保留 字串後面的內容需要翻譯成繁體中文 並保持一句原文對應一句翻譯的中文關係. 絕對不能有上下文合併到同一行文字 不然時間戳記會錯誤"
)
# 检查运行状态
while True:
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
if run.status == 'completed':
break
time.sleep(1) # 等待一秒再次检查
# 步骤 5: 显示助手的回应并处理翻译结果
messages = client.beta.threads.messages.list(thread_id=thread.id)
for message in messages.data:
if message.role == "assistant":
for content in message.content:
if content.type == 'text':
# 顯示翻譯進度
text = content.text.value
text = text.replace('\\(', '(').replace('\\)', ')')
debug_trans += text + "\n"
translated_text = content.text.value
print(f"{translated_text}")
#translated_sentences = translated_text.split('\n')
translated_sentences = [line for line in translated_text.split('\n') if line.strip()]
# 移除非翻譯結果的內容
# 正則表達式匹配一個數字(整數或浮點數)後跟著 '##'
pattern = r'\b\d+(\.\d+)?##'
sharpNum = sum(1 for sentence in translated_sentences if re.search(pattern, sentence))
i = len(translated_sentences) - 1
inputLines = len(subtitles_text.split("\n"))
# print(f"sharpNum : {sharpNum}, inputLines : {inputLines}")
while i >= 0:
if len(translated_sentences[i].strip())<=1:
translated_sentences.pop(i)
# 假如API傳回無意義字串 其他字串有時間戳 則將無意義字串刪除
if(sharpNum == inputLines and re.search(pattern, translated_sentences[i]) is None):
translated_sentences.pop(i)
i -= 1
# 如果輸出行數一致 將翻譯字串匹配回原文
inputLines = subtitles_text.split("\n")
if len(translated_sentences) == len(inputLines):
for trans_sentence in translated_sentences:
if re.search(pattern, trans_sentence):
for subtitle in batch_subtitles:
#print(f"found ## in {trans_sentence}")
token = trans_sentence.split('##')
if str(subtitle['start']) == str(token[0]) and len(token[1].strip()) > 0:
subtitle['trans'] = token[-1]
break # 找到匹配项后跳出内层循环
translated_subtitles.append(subtitle) # 将处理过的字幕添加到列表中
break
# 檢查行數是否對應正確
inputLines = subtitles_text.split("\n")
if len(translated_sentences) != len(inputLines):
if currentReq >= config.maxReqTime: # maxRetryLimit 是您設定的最大重試次數
print(f"{len(inputLines)} -> {len(translated_sentences)}翻譯行數不符,但已達到最大重試次數。")
print("*******強制使用API逐行翻譯*******")
debug_trans += translteByLine(batch_subtitles, inputLines, translated_subtitles)
break
print(f"{len(inputLines)} -> {len(translated_sentences)}翻譯行數不符,正在重新嘗試...")
debug_trans += "\n翻譯行數不符,正在重新嘗試...\n"
currentReq += 1
continue # 重新進入迴圈進行請求
if '##' in trans_sentence or not config.forceMatch or currentReq > config.maxReqTime:
if currentReq > config.maxReqTime:
currentReq = 0
break
with open(os.path.join(process_path,config.AITranslationLog), 'a', encoding='utf-8') as file:
file.write(debug_trans)
# 将翻译后的字幕追加写入 JSON 文件
with open(os.path.join(process_path,config.AITranslationFile), 'a', encoding='utf-8') as file:
json.dump(translated_subtitles, file, ensure_ascii=False, indent=4)
| [] |
2024-01-10 | con2000us/GPT_transcriber | moduel~k1.py | ## input : config.whisperFile
## output : config.AIPunctuationFile
from openai import OpenAI
import os
import re
import json
import time
import config_reader
config = config_reader.config
inputfile = config.whisperFile
process_path = os.path.join("process")
with open(os.path.join(process_path,config.AIPunctuationLog), 'w', encoding='utf-8') as file:
file.write('')
debug_text = ""
def time_to_seconds(time_str):
"""将时间字符串转换为秒"""
hours, minutes, seconds_milliseconds = time_str.split(':')
if ',' in seconds_milliseconds:
seconds, milliseconds = map(int, seconds_milliseconds.split(','))
else:
seconds = int(seconds_milliseconds)
milliseconds = 0
return int(hours) * 3600 + int(minutes) * 60 + seconds + milliseconds / 1000000
def parse_srt(file_content):
"""解析 SRT 文件内容"""
pattern = re.compile(r'(\d{1,2}:\d{2}:\d{2}(?:,\d{6})?) --> (\d{1,2}:\d{2}:\d{2}(?:,\d{6})?)\n(.*?)\n\n', re.DOTALL)
subtitles = []
for match in pattern.finditer(file_content):
#print(match)
start_str, end_str, text = match.groups()
# 確保不是空字串再轉存
if len(text.replace('\n', ' ')) > 0:
start = time_to_seconds(start_str)
end = time_to_seconds(end_str)
subtitles.append({
"id": len(subtitles) + 1,
"start": start,
"end": end,
"text": text.replace('\n', ' ')
})
return subtitles
def read_srt_file(filename):
"""从文件中读取 SRT 内容"""
with open(os.path.join(process_path,filename), 'r', encoding='utf-8') as file:
return file.read()
# 读取字幕文件
srt_content = read_srt_file(inputfile)
# 解析字幕文件
subtitles = parse_srt(srt_content)
def check_subtitles(subtitles):
# 新数组,用于存储连续的不符合条件的字幕对象
continuous_subtitles = []
if not config.lineArrange:
return continuous_subtitles
# 用于计数连续不符合条件的字幕对象数量
count = 0
count_flag = False
for subtitle in subtitles:
text = subtitle['text']
#是否已進入連續狀態
if count_flag:
#字句仍然連續 繼續加入處理陣列
if not (text.endswith('.') or text.endswith('!') or '. ' in text):
continuous_subtitles.append(subtitle)
else:
#不連續則中斷並改回flag
count_flag = False
else:
if not (text.endswith('.') or text.endswith('!') or '. ' in text):
#發現開始有無標點字串 先加入處理陣列
continuous_subtitles.append(subtitle)
#已經超過三句連續 進入正式連續狀態
if count > 3:
count = 0
count_flag = True
else:
#已經未過三句連續 繼續加總閾值
count += 1
else:
#發現斷點 而且已經處於連續狀態
if count_flag:
count_flag = False
else:
#發現斷點 但最後加進的字串未達連續閾值 全回溯清除
while count > 0:
continuous_subtitles.pop()
count -= 1
return continuous_subtitles
def group_array(arr):
# 分组
groups = []
current_group = []
for item in arr:
if not current_group:
# 如果当前组为空,直接添加元素
current_group.append(item)
else:
# 检查当前元素的 id 是否与前一个元素的 id 连续
if item['id'] - current_group[-1]['id'] == 1:
current_group.append(item)
else:
# 如果不连续,将当前组添加到 groups,然后开始新的组
groups.append(current_group)
current_group = [item]
# 添加最后一个组(如果有)
if current_group:
groups.append(current_group)
# 過長的連續字串需分批切割餵給API
split_index = 13
index = 0
while True:
if len(groups[index]) >= 16:
cur_ele = groups.pop(index)
groups.insert(index,cur_ele[:split_index])
groups.insert(index+1,cur_ele[split_index:])
index += 1
#掃描完畢
if index >= len(groups):
break
return groups
##########################################處理文句過長沒分段問題###############################################
with open(os.path.join(process_path,config.workingFile), 'r') as file:
lng = json.load(file)['lng']
# 示例:应用函数并显示结果
if config.lineArrange and lng == 'en':
print(f"檢查標點符號是否缺失(僅限英文字句)")
problematic_subtitles = check_subtitles(subtitles)
else:
problematic_subtitles = []
if problematic_subtitles:
grouped_objects = group_array(problematic_subtitles)
for group in grouped_objects:
print([obj['id'] for obj in group])
if len(grouped_objects) == 1 and not grouped_objects[0]:
grouped_objects = []
else:
grouped_objects = []
###########################################讓AI處理文句分段問題####################################################
with open('moduel/API.key', 'r') as file:
key = file.read()
client = OpenAI(
api_key=key,
)
# 步骤 1: 创建一个助手
assistant = client.beta.assistants.create(
name="John Doe",
instructions="你是個翻譯,負責翻譯外國字幕轉成文句通暢的中文",
model=config.splitLineModel
)
def update_subtitles(subtitles, adj_sentences):
for sentence in adj_sentences:
# 分割每行以获取开始时间和文本
if "##" in sentence:
token = sentence.split("##")
sst = token[0].strip()
# 在subtitles中查找匹配的开始时间
for subtitle in subtitles:
if str(subtitle["start"]) == str(sst):
# 找到匹配项,更新text并保存旧的text
subtitle["text_old"] = subtitle["text"]
subtitle["text"] = token[-1]
break
def evalResult(text, lineNum):
print("*****資料合法性解析*****")
returnLines = []
for line in text:
cleaned_line = line.strip('"')
pattern = r"^(\d+(\.\d{1,2})?)[^a-zA-Z]*"
match = re.match(pattern, line)
if match:
fixed_line = re.sub(pattern, r"\1##", cleaned_line)
print(cleaned_line + " 匹配完成")
returnLines.append(fixed_line)
else:
print(cleaned_line + " 無匹配")
return returnLines
for idx, group in enumerate(grouped_objects, start=1):
passFlag = True
currentReq = 0
tempReault = []
while passFlag:
currentReq += 1
# 构建消息内容,包括字幕文本
subtitles_text = "\n".join([f"{subtitle['start']}##{subtitle['text']}" for subtitle in group])
message_content = "本句之後是一段字幕內容 ##前面的數值與##本身不須更動並且必須保留 觀察每句的前後文然後將文章補上標點符號 另外注意每行的結尾不一定是整句的結尾,請仔細觀察前後文的內容再決定句號的位置. 另外不要使用省略號 \n" + subtitles_text
print(f"############################################# {idx} / {len(grouped_objects)} ###############################################")
# print(f"{message_content}")
print(f"{subtitles_text}")
print(f"--------------------------------------------- {idx} / {len(grouped_objects)} -----------------------------------------------")
debug_text += f"############################################# {idx} / {len(grouped_objects)} ###############################################\n"
debug_text += subtitles_text + "\n"
debug_text += f"--------------------------------------------- {idx} / {len(grouped_objects)} -----------------------------------------------\n"
# 步骤 2: 创建一个线程
thread = client.beta.threads.create()
# 步骤 3: 向线程添加一条消息
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=message_content
)
# 步骤 4: 运行助手
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
# instructions="Please address the user as Jane Doe. The user has a premium account."
instructions="這些句子沒有適當的標點符號 請觀察每句的前後文然後在適合的文句補上標點符號(注意! 每行的結尾不一定是整句的結尾,請仔細觀察前後文的內容) 讓每句子盡量不超過20個單字.另外不要使用省略號"
)
# 检查运行状态
while True:
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
if run.status == 'completed':
break
time.sleep(1) # 等待一秒再次检查
# 步骤 5: 显示助手的回应并处理结果
messages = client.beta.threads.messages.list(thread_id=thread.id)
for message in messages.data:
if message.role == "assistant":
for content in message.content:
if content.type == 'text':
# 顯示斷句內容
text = content.text.value
text = text.replace('\\(', '(').replace('\\)', ')')
# print(text)
adj_text = content.text.value
adj_sentences = [line for line in adj_text.split('\n') if line.strip()]
debug_text += text + "\n" + f"建議行數 : {len(group)}\n"
print(adj_text + f"\n建議行數 : {len(group)}\n")
fixed_sentences = evalResult(adj_sentences,len(group))
print("...................................................................")
for sentence in fixed_sentences:
print(sentence)
print(f"\n合法行數 : {len(fixed_sentences)}\n")
#update_subtitles(subtitles, adj_sentences)
if len(fixed_sentences) == len(group):
update_subtitles(subtitles, fixed_sentences)
passFlag = False
break
else:
if currentReq > config.maxReqTime:
update_subtitles(subtitles, max(tempReault, key=len))
passFlag = False
break
else:
tempReault.append(fixed_sentences)
continue
def split_subtitle(subtitle):
"""根据句号分割字幕,并调整时间戳"""
text = subtitle['text']
start = subtitle['start']
end = subtitle['end']
# 检查文本是否包含句号
if '. ' in text or '! ' in text or '? ' in text or '。' in text:
# 根据句号分割文本
sentences = re.split(r'(?<!\bMr)(?<!\bMrs)(?<!\bDr)(?<!\bMs)\. |[!?。] ', text)
split_subtitles = []
# 计算每个句子的时间戳
total_length = len(text)
current_start = start
for i, sentence in enumerate(sentences):
sentence_length = len(sentence)
# 确保不除以零
if total_length > 0 and sentence_length > 0:
proportion = sentence_length / total_length
current_end = current_start + proportion * (end - start)
# 为除最后一个句子外的句子添加句号
if i < len(sentences) - 1:
sentence += '.'
# 创建新的字幕对象
split_subtitles.append({
"start": current_start,
"end": current_end,
"text": sentence.strip()
})
current_start = current_end
return split_subtitles
else:
return [subtitle]
def is_end_of_sentence(text):
# 檢查文本是否以非縮寫的句號、驚嘆號或問號結尾
return re.search(r'(?<!\bMr)(?<!\bMrs)(?<!\bDr)(?<!\bMs)(?<!\bSt)\.(?!\w)|[!?。]$', text) is not None
def merge_subtitles(subtitles, max_length=config.maxCharPerLine):
merged_subtitles = []
i = 0
if not config.lineArrange:
return subtitles
while i < len(subtitles):
current_subtitle = subtitles[i]
text = current_subtitle['text']
start = current_subtitle['start']
end = current_subtitle['end']
# 合併直到找到以句號結尾的文本或超過最大長度
while not is_end_of_sentence(text) and i < len(subtitles) - 1:
next_subtitle = subtitles[i + 1]
next_text = next_subtitle['text']
# 檢查合併後的長度
if len(text + ' ' + next_text) > max_length:
break
# 更新字幕信息
text += ' ' + next_text
end = next_subtitle['end']
i += 1
# 添加合併後的字幕
merged_subtitles.append({
"start": start,
"end": end,
"text": text
})
i += 1
return merged_subtitles
def round_timestamps(subtitles):
rounded_subtitles = []
for subtitle in subtitles:
rounded_start = round(subtitle['start'], 2)
rounded_end = round(subtitle['end'], 2)
rounded_subtitles.append({
"start": rounded_start,
"end": rounded_end,
"text": subtitle['text']
})
return rounded_subtitles
# 应用分割函数
if config.lineArrange and lng == 'en':
split_subtitles = []
for subtitle in subtitles:
split_subtitles.extend(split_subtitle(subtitle))
# 应用合并函数
merged_subtitles = merge_subtitles(split_subtitles)
# 应用四舍五入函数
rounded_subtitles = round_timestamps(merged_subtitles)
else:
rounded_subtitles = subtitles
with open(os.path.join(process_path,config.AIPunctuationLog), 'w', encoding='utf-8') as file:
file.write(debug_text)
# 将处理后的字幕数据写入 JSON 文件
with open(os.path.join(process_path,config.AIPunctuationFile), 'w', encoding='utf-8') as json_file:
json.dump(rounded_subtitles, json_file, ensure_ascii=False, indent=4)
print(f"字幕数据已保存到 {config.AIPunctuationFile} 文件中。")
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.