date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | sytelus/transformers | tests~test_modeling_tf_openai.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import OpenAIGPTConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers.models.openai.modeling_tf_openai import (
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFOpenAIGPTDoubleHeadsModel,
TFOpenAIGPTForSequenceClassification,
TFOpenAIGPTLMHeadModel,
TFOpenAIGPTModel,
)
class TFOpenAIGPTModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = OpenAIGPTConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_openai_gpt_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFOpenAIGPTModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_openai_gpt_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFOpenAIGPTLMHeadModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_openai_gpt_double_head(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
):
model = TFOpenAIGPTDoubleHeadsModel(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"mc_token_ids": mc_token_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)
)
self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))
def create_and_check_openai_gpt_for_sequence_classification(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
config.num_labels = self.num_labels
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"labels": sequence_labels,
}
model = TFOpenAIGPTForSequenceClassification(config)
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFOpenAIGPTModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification)
if is_tf_available()
else ()
)
all_generative_model_classes = (
(TFOpenAIGPTLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFOpenAIGPTModelTester(self)
self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_openai_gpt_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)
def test_openai_gpt_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_lm_head(*config_and_inputs)
def test_openai_gpt_double_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_double_head(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert name is None
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_openai_gpt_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs)
def test_mixed_precision(self):
# TODO JP: Make OpenAIGPT float16 compliant
pass
def test_xla_mode(self):
# TODO JP: Make OpenAIGPT XLA compliant
pass
@slow
def test_model_from_pretrained(self):
for model_name in TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFOpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFOPENAIGPTModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_openai_gpt(self):
model = TFOpenAIGPTLMHeadModel.from_pretrained("openai-gpt")
input_ids = tf.convert_to_tensor([[481, 4735, 544]], dtype=tf.int32) # the president is
expected_output_ids = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
| [] |
2024-01-10 | Yoshida24/slackpi | src~features~llm_chatbot~llm_chatbot.py | from type.type import MentionEventHandlerArgs
from modules.bolt.reply import reply
from modules.bolt.update_message import update_message
from modules.bolt.upload_file import upload_file
from openai import OpenAI
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
import os
import json
from modules.bolt.extract_mention_and_text import extract_mention_and_text
from modules.bolt.is_message_from_bot import is_message_from_bot
from features.llm_chatbot.tools.pokefunction import (
fetch_pokemon_data_impl,
fetch_pokemon_data_tool,
)
from features.llm_chatbot.tools.screenshot import (
take_screenshot_impl,
take_screenshot_tool,
)
from features.llm_chatbot.tools.pixoo64.pixoo64_display_image_text import (
pixoo64_display_image_text_impl,
pixoo64_display_image_text_tool,
)
from features.llm_chatbot.tools.command_line.make import (
make_impl,
make_tool,
)
from typing import Callable
import logging
logger = logging.getLogger(__name__)
openai_client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
llm_model = "gpt-4-1106-preview"
temperature = 0.1
system_msg = "You are a Friendly and helpful AI assistant."
tools = [
take_screenshot_tool,
fetch_pokemon_data_tool,
pixoo64_display_image_text_tool,
make_tool,
]
def tools_response(
messages: list[ChatCompletionMessageParam],
presenter: Callable[[str], None],
) -> dict:
logger.info(f"\033[94mMESSAGES_INPUT\033[0m:\n{messages}")
function_response_file = None
first_response = openai_client.chat.completions.create(
model=llm_model,
temperature=temperature,
messages=messages,
tools=tools,
tool_choice="auto",
)
first_message = first_response.choices[0].message
tool_calls = first_message.tool_calls
if tool_calls:
"""
Parallel function calling が実行された場合
"""
messages.append(first_message)
# Pallarel function calling
for tool_call in tool_calls:
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
for tool in tools:
if function_name == tool["function"]["name"]:
logger.info(
f"\033[94mTOOL_CALLED\033[0m:\nfunction_name={function_name} arguments={arguments}"
)
if function_name == "take_screenshot":
selected_function = take_screenshot_impl
if function_name == "pixoo64_display_image_text":
selected_function = pixoo64_display_image_text_impl
elif function_name == "fetch_pokemon_data":
selected_function = fetch_pokemon_data_impl
elif function_name == "make":
selected_function = make_impl
else:
raise Exception("function not found")
function_response = selected_function(**arguments)
function_response_msg = function_response[
"message"
] # TODO: メッセージと他引数をうける
function_response_file = function_response["file"]
logger.info(
f"\033[94mTOOL_COMPLETED\033[0m:\nmessage={function_response_msg} function_response_file={function_response_file}"
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response_msg,
}
)
# 関数実行結果を使ってもう一度質問
second_response = openai_client.chat.completions.create(
model=llm_model, temperature=temperature, top_p=1, messages=messages
)
response = second_response
else:
response = first_response
logger.info(f"\033[94mMESSAGES_UPDATED\033[0m:\n{response}")
result = {
"content": response.choices[0].message.content,
"file": function_response_file,
}
presenter(result["content"])
return result
def slack_reply_clojure(args: MentionEventHandlerArgs, message_ts: str):
def slack_reply(text: str | None):
if text is None or text == "":
return
update_message(
app=args.app,
channel=args.event.event.channel,
ts=message_ts,
text=text,
)
return slack_reply
def handler(args: MentionEventHandlerArgs) -> None:
try:
# Notice user to accept the request
thread_start_res = reply(
app=args.app,
mention_body=args.event,
text="...",
)
# Init memory
messages = []
messages.append({"role": "system", "content": system_msg})
conversations = sorted(args.messages, key=lambda x: x["ts"]) # type: ignore
parsed_conversations = [
{
"role": ("assistant" if is_message_from_bot(conversation) else "user"),
"content": extract_mention_and_text(conversation["text"])["text"]
+ (
"\nattached files: "
+ "".join(
[
"\n- " + file["url_private_download"]
for file in conversation["files"]
]
)
if "files" in conversation
else ""
),
}
for conversation in conversations
]
messages.extend(parsed_conversations)
logger.info(f"\033[94mHISTORY\033[0m:\n{conversations}")
slack_reply = slack_reply_clojure(args, thread_start_res["ts"])
tools_result = tools_response(messages=messages, presenter=slack_reply)
if tools_result["file"] is not None:
upload_file(
app=args.app,
mention_body=args.event,
file=tools_result["file"],
thread_ts=thread_start_res["ts"],
)
except BaseException as e:
reply(
app=args.app,
mention_body=args.event,
text=f"Error: {str(str(e))}",
)
raise e
| [
"\nattached files: ",
"You are a Friendly and helpful AI assistant.",
"url_private_download",
"\n- "
] |
2024-01-10 | Yoshida24/slackpi | src~features~llm_chatbot~tools~pixoo64~pixoo64_display_image_text.py | from .pixoo64 import display
from openai.types.chat import ChatCompletionToolParam
def pixoo64_display_image_text_impl(
image_url: str | None = None,
text: str | None = None,
text_color: str = "#FFFFFF",
text_pos: str = "bottom",
**kwargs
):
display(image_url=image_url, text=text, text_color=text_color, text_pos=text_pos)
return {"message": "success", "file": None}
pixoo64_display_image_text_tool: ChatCompletionToolParam = {
"type": "function",
"function": {
"name": "pixoo64_display_image_text",
"description": "Display image in URL to Pixoo, which is electronic billboard.",
"parameters": {
"type": "object",
"properties": {
"image_url": {
"type": "string",
"description": "Image URL. e.g. https://komori541milk.web.fc2.com/dot/4Shinnoh/474n.png",
},
"text": {
"type": "string",
"description": "Text to display on electronic billboard. e.g. hello world!",
},
"text_color": {
"type": "string",
"description": "Text color to display on electronic billboard. e.g. #FFFFFF",
},
"text_pos": {
"type": "string",
"description": "Text posision to display on electronic billboard. e.g. bottom",
"enum": ["top", "middle", "bottom"],
},
},
"required": [],
},
},
}
| [] |
2024-01-10 | Yoshida24/slackpi | src~features~llm_chatbot~tools~command_line~make.py | import subprocess
from openai.types.chat import ChatCompletionToolParam
def make_impl(dir: str, make_args: str, **kwargs):
# makeコマンドを実行
results = []
if make_args.startswith("make "):
make_args = make_args[5:]
res_make = subprocess.run(
["make"] + make_args.split(" "), cwd=dir, stdout=subprocess.PIPE
)
results.append(res_make.stdout.decode("utf-8"))
return {"message": "\n".join(results), "file": None}
make_tool: ChatCompletionToolParam = {
"type": "function",
"function": {
"name": "make",
"description": "Run make command by subprocess.",
"parameters": {
"type": "object",
"properties": {
"dir": {
"type": "string",
"description": "Directory path to Makefile. e.g. /home/username/projectname/",
},
"make_args": {
"type": "string",
"description": "Args of make command. e.g. test",
},
},
"required": ["dir", "make_args"],
},
},
}
| [] |
2024-01-10 | Yoshida24/slackpi | src~features~llm_chatbot~tools~pokefunction.py | import json
import requests
from openai.types.chat import ChatCompletionToolParam
def fetch_pokemon_data_impl(name: str, **kwargs):
url = f"https://pokeapi.co/api/v2/pokemon/{name}"
response = requests.get(url)
if response.status_code == 200:
data = {
"wight": response.json()["weight"] * 0.1,
"height": response.json()["height"] * 0.1,
}
return {"message": json.dumps(data), "file": None}
else:
return {"message": "not found.", "file": None}
fetch_pokemon_data_tool: ChatCompletionToolParam = {
"type": "function",
"function": {
"name": "fetch_pokemon_data",
"description": "Fetch pokemon data by pokemon name.",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Pokemon name, e.g. pikachu",
},
},
"required": ["name"],
},
},
}
| [] |
2024-01-10 | Yoshida24/slackpi | src~features~llm_chatbot~tools~screenshot.py | from io import BytesIO
from playwright.sync_api import sync_playwright
from openai.types.chat import ChatCompletionToolParam
def take_screenshot_impl(url: str, **kwargs) -> dict:
# BytesIOオブジェクトを作成します
output = BytesIO()
with sync_playwright() as playwright:
browser = playwright.chromium.launch()
page = browser.new_page()
page.goto(url)
screenshot = page.screenshot(full_page=True)
output.write(screenshot)
browser.close()
return {
"message": "スクリーンショットを正常に取得しました。あなたのプラットフォームにアップロード機能があれば、それを使用してスクリーンショットをあなたのプラットフォームへアップロードします。",
"file": output,
}
take_screenshot_tool: ChatCompletionToolParam = {
"type": "function",
"function": {
"name": "take_screenshot",
"description": "Save screenshot of web page.",
"parameters": {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "URL to take screenshot. e.g. https://www.google.com/",
},
},
},
},
}
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~data_agent~executors~data_summary_executor.py | from typing import Any, Dict, Tuple, Union
from abc import ABC
from langchain.base_language import BaseLanguageModel
from langchain import PromptTemplate
from real_agents.adapters.callbacks.executor_streaming import ExecutorStreamingChainHandler
from real_agents.adapters.data_model import DatabaseDataModel, TableDataModel, ImageDataModel
from real_agents.adapters.llm import LLMChain
class DataSummaryExecutor(ABC):
tool_name = "DataProfiling"
def _intelligent_summary(self, grounding_source: ImageDataModel, num_insights: int, llm: BaseLanguageModel) -> str:
"""Use LLM to generate data summary."""
pass
class TableSummaryExecutor(DataSummaryExecutor):
SUMMARY_PROMPT_TEMPLATE = """
{table_info}
Provide a succinct yet meaningful summary of the table with less than 20 words, encapsulating its essence beyond just enumerating the columns. Please ensure your summary is a complete sentence and include it within <summary></summary> tags."
Note the table actually far more rows than shown above, so you MUST NOT make any rash conclusions based on the shown table rows or cells."
Then provide {num_insights} insightful and interesting suggestions in natural language that users can directly say to analyze the table. The suggestions should be able to be solved by python/sql."
The final results should be markdown '+' bullet point list, e.g., + The first suggestion.
Begin."
"""
stream_handler = ExecutorStreamingChainHandler()
def run(
self,
grounding_source: Union[TableDataModel, DatabaseDataModel],
llm: BaseLanguageModel,
use_intelligent_summary: bool = True,
num_insights: int = 3,
) -> Dict[str, Any]:
summary = ""
if isinstance(grounding_source, TableDataModel):
df = grounding_source.raw_data
df_name = grounding_source.raw_data_name
# Basic summary
summary += f"Your table {df_name} contains {df.shape[0]} rows and {df.shape[1]} columns. "
null_count = df.isnull().sum().sum() # Get total number of null values
unique_values_avg = df.nunique().mean() # Get average number of unique values
summary += f"On average, each column has about {unique_values_avg:.0f} unique values. "
if null_count > 0:
summary += f"Watch out, there are {null_count} missing values in your data. "
else:
summary += "Good news, no missing values in your data. "
# Intelligent summary
if use_intelligent_summary:
intelligent_summary = self._intelligent_summary(
grounding_source,
num_insights=num_insights,
llm=llm,
)
table_summary, suggestions = self._parse_output(intelligent_summary)
summary += table_summary
summary += "\n" + "Here are some additional insights to enhance your understanding of the table."
summary += "\n" + suggestions
for stream_token in summary.split(" "):
self.stream_handler.on_llm_new_token(stream_token)
elif isinstance(grounding_source, DatabaseDataModel):
# TODO: Convert to df or use SQL query for basic summary
raise NotImplementedError("DatabaseDataModel is not supported yet.")
else:
raise ValueError(f"Unsupported grounding source type: {type(grounding_source)}")
return summary
def _intelligent_summary(
self, grounding_source: Union[TableDataModel, DatabaseDataModel], num_insights: int, llm: BaseLanguageModel
) -> str:
"""Use LLM to generate data summary."""
summary_prompt_template = PromptTemplate(
input_variables=["table_info", "num_insights"],
template=self.SUMMARY_PROMPT_TEMPLATE,
)
method = LLMChain(llm=llm, prompt=summary_prompt_template)
result = method.run({"table_info": grounding_source.get_llm_side_data(), "num_insights": num_insights})
return result
def _parse_output(self, content: str) -> Tuple[str, str]:
"""Parse the output of the LLM to get the data summary."""
from bs4 import BeautifulSoup
# Using 'html.parser' to parse the content
soup = BeautifulSoup(content, "html.parser")
# Parsing the tag and summary contents
try:
table_summary = soup.find("summary").text
except Exception:
import traceback
traceback.print_exc()
table_summary = ""
lines = content.split("\n")
# Initialize an empty list to hold the parsed bullet points
bullet_points = []
# Loop through each line
bullet_point_id = 1
for line in lines:
# If the line starts with '+', it is a bullet point
if line.startswith("+"):
# Remove the '+ ' from the start of the line and add it to the list
bullet_points.append(f"{bullet_point_id}. " + line[1:].strip().strip('"'))
bullet_point_id += 1
return table_summary, "\n".join(bullet_points)
class ImageSummaryExecutor(DataSummaryExecutor):
SUMMARY_PROMPT_TEMPLATE = """
{img_info}
Provide a succinct summary of the uploaded file with less than 20 words. Please ensure your summary is a complete sentence and include it within <summary></summary> tags. For image, just show its name is basically enough."
Then provide {num_insights} very simple and basic suggestions in natural language about further processing with the data. The suggestions should be able to be solved by python(e.g., grayscale, rescale, rotation, etc). The final results should be markdown '+' bullet point list, e.g., + The first suggestion."
Begin.
"""
stream_handler = ExecutorStreamingChainHandler()
def run(
self,
grounding_source: ImageDataModel,
llm: BaseLanguageModel,
use_intelligent_summary: bool = True,
num_insights: int = 3,
) -> Dict[str, Any]:
summary = ""
if isinstance(grounding_source, ImageDataModel):
# Basic summary
raw_data = grounding_source.raw_data
img_size, img_mode, img_format = raw_data["size"], raw_data["mode"], raw_data["format"]
summary += f"Your image **{grounding_source.simple_filename}** is a {img_size[0]}x{img_size[1]} {img_mode} image in {img_format} format.\n"
# Intelligent summary
if use_intelligent_summary:
intelligent_summary = self._intelligent_summary(
grounding_source,
num_insights=num_insights,
llm=llm,
)
_, suggestions = self._parse_output(intelligent_summary)
summary += "\n" + "Here are some additional insights to enhance your understanding of the image"
summary += "\n" + suggestions
for stream_token in summary.split(" "):
self.stream_handler.on_llm_new_token(stream_token)
else:
raise ValueError(f"Unsupported data summary for grounding source type: {type(grounding_source)}")
return summary
def _intelligent_summary(self, grounding_source: ImageDataModel, num_insights: int, llm: BaseLanguageModel) -> str:
"""Use LLM to generate data summary."""
summary_prompt_template = PromptTemplate(
input_variables=["img_info", "num_insights"],
template=self.SUMMARY_PROMPT_TEMPLATE,
)
method = LLMChain(llm=llm, prompt=summary_prompt_template)
result = method.run({"img_info": grounding_source.get_llm_side_data(), "num_insights": num_insights})
return result
def _parse_output(self, content: str) -> Tuple[str, str]:
"""Parse the output of the LLM to get the data summary."""
from bs4 import BeautifulSoup
# Using 'html.parser' to parse the content
soup = BeautifulSoup(content, "html.parser")
# Parsing the tag and summary contents
try:
table_summary = soup.find("summary").text
except Exception:
import traceback
traceback.print_exc()
table_summary = ""
lines = content.split("\n")
# Initialize an empty list to hold the parsed bullet points
bullet_points = []
# Loop through each line
bullet_point_id = 1
for line in lines:
# If the line starts with '+', it is a bullet point
if line.startswith("+"):
# Remove the '+ ' from the start of the line and add it to the list
bullet_points.append(f"{bullet_point_id}. " + line[1:].strip().strip('"'))
bullet_point_id += 1
return table_summary, "\n".join(bullet_points)
| [
"table_info",
"\n{table_info}\n\nProvide a succinct yet meaningful summary of the table with less than 20 words, encapsulating its essence beyond just enumerating the columns. Please ensure your summary is a complete sentence and include it within <summary></summary> tags.\"\nNote the table actually far more rows than shown above, so you MUST NOT make any rash conclusions based on the shown table rows or cells.\"\nThen provide {num_insights} insightful and interesting suggestions in natural language that users can directly say to analyze the table. The suggestions should be able to be solved by python/sql.\"\nThe final results should be markdown '+' bullet point list, e.g., + The first suggestion.\n\nBegin.\"\n",
"num_insights",
"\n{img_info}\n\nProvide a succinct summary of the uploaded file with less than 20 words. Please ensure your summary is a complete sentence and include it within <summary></summary> tags. For image, just show its name is basically enough.\"\nThen provide {num_insights} very simple and basic suggestions in natural language about further processing with the data. The suggestions should be able to be solved by python(e.g., grayscale, rescale, rotation, etc). The final results should be markdown '+' bullet point list, e.g., + The first suggestion.\"\n\nBegin.\n",
"img_info"
] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~web_agent~executors~webot_executor.py | """
Implementation of the WebotExecutor class.
WebotExecutor takes user's intent as input, return the start_url and instruction as the input for web browsing plugin
"""
from __future__ import annotations
from typing import Any, Dict, Union
from langchain.base_language import BaseLanguageModel
from pydantic import BaseModel, Extra
from real_agents.web_agent.web_browsing.base import WebotCallingChain
# This executor is for chat interface usage and not for the extension usage.
# For the extension usage, refer to xlang/real_agents/web_agent/executors/web_browsing_executor.py
class WebotExecutor(BaseModel):
"""
WebotExecutor takes user's intent as input, return the start_url and instruction as the input for web browsing plugin (tool).
"""
name: str
description: str
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@classmethod
def from_webot(cls) -> WebotExecutor:
return cls(
name="WeBot",
description="Use the web navigation agent to perform actions on the web, including information retrieval, task completion(e.g. write an email or tweet or organize a meeting), etc. The action input should contain the action and the start url.\For example:\nUse xxx.com to search xxx.",
)
def run(
self,
user_intent: str,
llm: BaseLanguageModel,
) -> Union[str, Dict[str, Any]]:
inputs = {"input_str": user_intent}
method = WebotCallingChain.from_llm(
llm,
)
output = method(inputs)
return output
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~plugins_agent~plugin.py | """An agent designed to hold a conversation in addition to using tools. (Specially designed for plugins model)"""
from __future__ import annotations
from typing import Any, List, Optional, Sequence, Tuple, Union
from pydantic import Extra, Field
from typing_extensions import override
from langchain.agents.agent import AgentOutputParser
from langchain.agents.utils import validate_tools_single_input
from langchain.base_language import BaseLanguageModel
from langchain.chains import LLMChain
from langchain.schema import (
AgentAction,
AgentFinish,
AIMessage,
BaseMessage,
BaseOutputParser,
HumanMessage
)
from langchain.callbacks.manager import (
Callbacks
)
from langchain.tools.base import BaseTool
from langchain.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from real_agents.adapters.agent_helpers.agent import Agent
from real_agents.adapters.agent_helpers.output_parser import ConversationOutputParser
from real_agents.plugins_agent.plugin_prompt import (
PREFIX,
SUFFIX,
TEMPLATE_TOOL_RESPONSE,
fake_continue_prompt
)
from real_agents.adapters.data_model import DataModel, MessageDataModel
from real_agents.data_agent.copilot import ConversationalChatAgent
class ConversationalPluginChatAgent(ConversationalChatAgent):
"""An agent designed to hold a conversation in addition to using plugin tool."""
output_parser: ConversationOutputParser = Field(
default_factory=ConversationOutputParser()
)
template_tool_response: str = TEMPLATE_TOOL_RESPONSE
continue_model: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.allow
arbitrary_types_allowed = True
@classmethod
def _get_default_output_parser(
cls, **kwargs: Any
) -> ConversationOutputParser:
return ConversationOutputParser()
@property
def _agent_type(self) -> str:
raise NotImplementedError
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(cls.__name__, tools)
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
system_message: str = PREFIX,
human_message: str = SUFFIX,
input_variables: Optional[List[str]] = None,
output_parser: Optional[BaseOutputParser] = None,
) -> BasePromptTemplate:
tool_strings = "\n".join([f"Name: {tool.name}\nDescription: {tool.description}" for tool in tools])
tool_strings = tool_strings.replace("{", "{{").replace("}", "}}")
tool_names = ", ".join([tool.name for tool in tools])
_output_parser = output_parser or cls._get_default_output_parser()
format_instructions = _output_parser.get_format_instructions("plugins")
format_instructions = format_instructions.format(tool_names=tool_names)
# system message
system_message = system_message + f"{tool_strings}\n\n{format_instructions}"
# human input
final_prompt = human_message
if input_variables is None:
input_variables = ["input", "chat_history", "agent_scratchpad"]
messages = [
SystemMessagePromptTemplate.from_template(system_message),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(final_prompt),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
def _construct_scratchpad(self, intermediate_steps: List[Tuple[AgentAction, str]]) -> List[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts: List[BaseMessage] = []
# Try to only use AI message for scratchpad
content = []
for idx, (action, full_observation) in enumerate(intermediate_steps):
content.append(MessageDataModel.extract_action_for_llm(action.log))
observation = full_observation
if isinstance(full_observation, DataModel):
llm_raw_observation = full_observation.get_llm_side_data()
observation = MessageDataModel.extract_tool_response_for_llm(llm_raw_observation, tool_style="plugin")
tool_response = self.template_tool_response.format(
observation=str(observation), tool_names=self.allowed_tools
)
if idx == len(intermediate_steps) - 1:
content.append(tool_response)
else:
content.append(observation)
content_str = "\n".join(content)
thoughts.append(AIMessage(content=content_str))
if self.continue_model is not None and len(intermediate_steps) != 0:
thoughts.append(HumanMessage(content=fake_continue_prompt[self.continue_model]))
return thoughts
@override
def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
system_prompt = self.llm_chain.prompt.messages[0].format().content
system_prompt_tokens = MessageDataModel._count_tokens(
system_prompt
)
max_tokens = 8000
max_gen_tokens = 1000
# FIXME: need more accurate token limit calculation
full_inputs = MessageDataModel.truncate_chat_history(full_inputs,
max_token=max_tokens - system_prompt_tokens - max_gen_tokens)
full_output = self.llm_chain.predict(**full_inputs)
return self.output_parser.parse(full_output)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callbacks: Callbacks = None,
output_parser: Optional[AgentOutputParser] = None,
system_message: str = PREFIX,
human_message: str = SUFFIX,
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
_output_parser = output_parser or cls._get_default_output_parser()
prompt = cls.create_prompt(
tools,
system_message=system_message,
human_message=human_message,
input_variables=input_variables,
output_parser=_output_parser,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
)
tool_names = [tool.name for tool in tools]
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
| [
"system_messageb2219787-d42d-467e-9fe0-4ae0713eee67PLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~executors~question_suggestion~question_suggestion_executor.py | from typing import Any, Dict
from langchain.base_language import BaseLanguageModel
from langchain.schema import AIMessage, HumanMessage
from real_agents.adapters.memory import ConversationReActBufferMemory
from real_agents.adapters.executors.question_suggestion.chat_memory import QuestionSuggestionChainChatMemory
from real_agents.adapters.executors.question_suggestion.base import QuestionSuggestionChainBase
from real_agents.adapters.executors.question_suggestion.user_profile import QuestionSuggestionChainUserProfile
class QuestionSuggestionExecutor:
def run(
self,
user_intent: str,
llm: BaseLanguageModel,
num_questions: int = 3,
mode: str = "",
user_profile: str = "",
chat_memory: ConversationReActBufferMemory = ConversationReActBufferMemory(),
) -> Dict[str, Any]:
if mode == "base":
method = QuestionSuggestionChainBase.from_prompt(llm)
inputs = {"input_string": user_intent, "num_questions": num_questions}
elif mode == "user_profile":
method = QuestionSuggestionChainUserProfile.from_prompt(llm)
with open(user_profile) as f:
inputs = {"input_string": user_intent, "num_questions": num_questions, "user_description": f.read()}
elif mode == "chat_memory":
method = QuestionSuggestionChainChatMemory.from_prompt(llm)
raw_history = chat_memory.load_memory_variables({})["chat_history"]
refine_history = []
for msg in raw_history[-4:]:
if isinstance(msg, HumanMessage):
refine_history.append(f"Human: {msg.content}")
elif isinstance(msg, AIMessage):
refine_history.append(f"AI: {msg.content}")
inputs = {
"input_string": user_intent,
"num_questions": num_questions,
"chat_memory": "\n".join(refine_history),
}
else:
raise ValueError(f"Mode {mode} is not supported")
return method(inputs)
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~memory~read_only_string_memory.py | from typing import Any, Dict, List
from langchain.schema import BaseMemory
class ReadOnlySharedStringMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
prev_memory_state = self.memory.return_messages
self.memory.return_messages = False
memory_string = self.memory.load_memory_variables(inputs)
self.memory.return_messages = prev_memory_state
return memory_string
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~models~anthropic.py | from typing import Any, Dict, List, Optional
from pydantic import Extra
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.anthropic import _AnthropicCommon
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
class ChatAnthropic(BaseChatModel, _AnthropicCommon):
r"""Wrapper around Anthropic's large language model.
To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
import anthropic
from langchain.llms import Anthropic
model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key")
"""
stop: Optional[List[str]] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anthropic-chat"
def _convert_one_message_to_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
message_text = f"{self.HUMAN_PROMPT} {message.content}"
elif isinstance(message, AIMessage):
message_text = f"{self.AI_PROMPT} {message.content}"
elif isinstance(message, SystemMessage):
message_text = f"{self.HUMAN_PROMPT} <admin>{message.content}</admin>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _convert_messages_to_text(self, messages: List[BaseMessage]) -> str:
"""Format a list of strings into a single string with necessary newlines.
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary newlines.
"""
return "".join(self._convert_one_message_to_text(message) for message in messages)
def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str:
"""Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
"""
if not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if not isinstance(messages[-1], AIMessage):
messages.append(AIMessage(content=""))
text = self._convert_messages_to_text(messages)
return text.rstrip() # trim off the trailing ' ' that might come from the "Assistant: "
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
if self.stop is not None:
if stop is None:
stop = self.stop
else:
stop.extend(self.stop)
if stop:
params["stop_sequences"] = stop
if self.streaming:
completion = ""
stream_resp = self.client.completion_stream(**params)
for data in stream_resp:
delta = data["completion"][len(completion) :]
completion = data["completion"]
if run_manager:
run_manager.on_llm_new_token(
delta,
)
else:
response = self.client.completion(**params)
completion = response["completion"]
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
if stop:
params["stop_sequences"] = stop
if self.streaming:
completion = ""
stream_resp = await self.client.acompletion_stream(**params)
async for data in stream_resp:
delta = data["completion"][len(completion) :]
completion = data["completion"]
if run_manager:
await run_manager.on_llm_new_token(
delta,
)
else:
response = await self.client.acompletion(**params)
completion = response["completion"]
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~memory~buffer.py | from typing import Any, Dict, List, Optional, Tuple
from pydantic import root_validator
from langchain.memory.utils import get_prompt_input_key
from langchain.base_language import BaseLanguageModel
from langchain.schema import BaseMessage, get_buffer_string
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from real_agents.adapters.data_model import DataModel, MessageDataModel
class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
if self.return_messages:
return self.chat_memory.messages
else:
return get_buffer_string(
self.chat_memory.messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
raise ValueError("return_messages must be False for ConversationStringBufferMemory")
return values
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += "\n" + "\n".join([human, ai])
def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
class ConversationReActBufferMemory(BaseChatMemory):
"""Buffer for storing conversational ReAct memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
max_token_limit: int = 2000
llm: BaseLanguageModel = None
style: str = "code"
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def action_prefix(self) -> str:
"""Prefix to append the action with."""
return "Action:"
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
@property
def llm_final(self) -> str:
"""Final Answer"""
@property
def buffer(self) -> List[BaseMessage]:
"""String buffer of memory."""
if self.return_messages:
return self.chat_memory.messages
else:
return get_buffer_string(
self.chat_memory.messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
def _get_input_output(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> Tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = list(outputs.keys())[0]
return inputs[prompt_input_key], outputs[output_key]
else:
assert "intermediate_steps" in outputs, "intermediate_steps must in outputs when output_key length > 1"
intermediate_message = ""
for action, full_observation in outputs["intermediate_steps"]:
intermediate_message += "\n{\n"
intermediate_message += (
'\t"action": "{}"'.format(action.tool) + "\n"
) # todo: move to schema, as well as the one in prompt
intermediate_message += '\t"action_input": "{}"'.format(action.tool_input) + "\n"
intermediate_message += "}\n"
observation = full_observation
if isinstance(full_observation, DataModel):
llm_raw_observation = full_observation.get_llm_side_data()
observation = MessageDataModel.extract_tool_response_for_llm(
llm_raw_observation, tool_style=self.style
)
intermediate_message += "{}\n".format(observation)
output = intermediate_message + outputs[list(outputs.keys())[0]]
return inputs[prompt_input_key], output
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def fit_max_token_limit(self):
from real_agents.adapters.data_model import MessageDataModel
# if self.llm != None:
buffer = self.chat_memory.messages
# curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
curr_buffer_length = MessageDataModel._count_tokens("\n".join([_.content for _ in buffer]))
if curr_buffer_length > self.max_token_limit:
while curr_buffer_length > self.max_token_limit:
buffer.pop(0)
curr_buffer_length = MessageDataModel._count_tokens("\n".join([_.content for _ in buffer]))
# curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
self.chat_memory.messages = buffer
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer. Pruned."""
super().save_context(inputs, outputs)
self.fit_max_token_limit()
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~executors~question_suggestion~chat_memory.py | from __future__ import annotations
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from real_agents.adapters.executors.question_suggestion.base import QuestionSuggestionChainBase
from real_agents.adapters.executors.question_suggestion.prompts import QUESTION_SUGGESTION_PROMPT_CHAT_MEMORY
class QuestionSuggestionChainChatMemory(QuestionSuggestionChainBase):
@classmethod
def from_prompt(cls, llm: BaseLanguageModel) -> QuestionSuggestionChainChatMemory:
"""Load from user profile prompt."""
llm_chain = LLMChain(llm=llm, prompt=QUESTION_SUGGESTION_PROMPT_CHAT_MEMORY)
return cls(llm_chain=llm_chain)
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~executors~chat_executor.py | from typing import Any, Dict
from langchain.base_language import BaseLanguageModel
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain.chains import ConversationChain
from real_agents.adapters.executors.base import BaseExecutor
from real_agents.adapters.memory import ConversationBufferMemory
class ChatExecutor(BaseExecutor):
"""Chat Executor."""
_DEFAULT_TEMPLATE = "The following is a friendly conversation between a human and an AI. \
The AI is talkative and provides lots of specific details from its context. \
If the AI does not know the answer to a question, it truthfully says it does not know."
output_key: str = "result"
def __init__(self) -> None:
"""Initialize the executor"""
self.memory = ConversationBufferMemory(return_messages=True)
def run(
self,
user_intent: str,
llm: BaseLanguageModel,
verbose: bool = True,
) -> Dict[str, Any]:
"""Run the executor.
Args:
user_intent: User intent to execute.
grounding_source: Grounding source to execute the program on.
llm: Language model to use.
verbose: Whether to print the logging.
Returns:
Result of string.
"""
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(self._DEFAULT_TEMPLATE),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}"),
]
)
method = ConversationChain(
llm=llm,
prompt=prompt,
verbose=verbose,
memory=self.memory,
)
result = method.predict(input=user_intent)
output = {self.output_key: result}
return output
| [
"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"{input}"
] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~schema.py | from typing import NamedTuple
from langchain import SQLDatabase
from sqlalchemy import text
from sqlalchemy.engine import Row
from tabulate import tabulate
from typing import List, Any
class AgentTransition(NamedTuple):
"""Agent's transition to take."""
return_values: dict
log: str
EMPTY_RESULT_STR = "NONE" # to show NONE result in front-end.
class SQLDatabase(SQLDatabase):
@staticmethod
def _pretty_format(headers: Any, result: List[Row]) -> str:
dicts = [dict(zip(headers, row)) for row in result]
tab_result = tabulate(tabular_data=dicts, headers="keys", tablefmt="psql")
if tab_result == "":
return EMPTY_RESULT_STR
return tab_result
def run(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.begin() as connection:
if self._schema is not None:
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
cursor = connection.execute(text(command))
if cursor.returns_rows:
headers = cursor.keys()
if fetch == "all":
result = cursor.fetchall()
elif fetch == "one":
# result = cursor.fetchone()[0] # type: ignore
result = [cursor.fetchone()] # type: ignore
else:
raise ValueError("Fetch parameter must be either 'one' or 'all'")
# pretty format
tab_result = self._pretty_format(headers, result)
return tab_result
return ""
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~models~__init__.py | from langchain.chat_models.google_palm import ChatGooglePalm
from real_agents.adapters.models.anthropic import ChatAnthropic
from real_agents.adapters.models.openai import ChatOpenAI
from real_agents.adapters.models.azure_openai import AzureChatOpenAI
__all__ = [
"ChatOpenAI",
"ChatAnthropic",
"ChatGooglePalm",
"AzureChatOpenAI",
]
type_to_cls_dict = {
"chat_anthropic": ChatAnthropic,
"chat_google_palm": ChatGooglePalm,
"chat_openai": ChatOpenAI,
"azure_chat_openai": AzureChatOpenAI,
}
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~agent_helpers~output_parser.py | from __future__ import annotations
from typing import Optional, Union
from pydantic import Extra
from langchain.schema import (
AgentAction,
AgentFinish,
)
from real_agents.adapters.agent_helpers.agent import AgentOutputParser
from real_agents.adapters.schema import AgentTransition
class ConversationOutputParser(AgentOutputParser):
class Config:
"""Configuration for this pydantic object."""
extra = Extra.allow
arbitrary_types_allowed = True
def get_format_instructions(self, app_name="copilot") -> str:
from real_agents.data_agent.copilot_prompt import FORMAT_INSTRUCTIONS as COPILOT_FORMAT_INSTRUCTIONS
from real_agents.plugins_agent.plugin_prompt import FORMAT_INSTRUCTIONS as PLUGINS_FORMAT_INSTRUCTIONS
from real_agents.web_agent.webot_prompt import FORMAT_INSTRUCTIONS as WEBOT_FORMAT_INSTRUCTIONS
if app_name == "copilot":
return COPILOT_FORMAT_INSTRUCTIONS
elif app_name == "webot":
return WEBOT_FORMAT_INSTRUCTIONS
elif app_name == "plugins":
return PLUGINS_FORMAT_INSTRUCTIONS
else:
raise ValueError(f"Unknown app_name {app_name}")
def parse(self, text: str) -> Union[AgentTransition, AgentAction, AgentFinish]:
cleaned_output = text.strip()
import re
def _extract_explanation(json_string: str) -> Optional[str]:
if "```" in json_string:
return json_string.split("```")[0]
else:
return None
def _extract_value(json_string: str, key: str) -> str:
pattern = re.compile(rf'"?{key}"?\s*:\s*("((?:[^"\\]|\\.)*)"|(\b[^,\s]*\b))', re.MULTILINE)
match = pattern.search(json_string)
if match:
return match.group(1).replace('\\"', '"').replace("\\\\", "\\").strip('"').strip("'")
raise ValueError(f"Could not find {key} in {json_string}")
try:
_action = _extract_value(cleaned_output, "action")
_action_input = _extract_value(cleaned_output, "action_input")
if _action == "Final Answer":
return AgentFinish({"output": _action_input}, cleaned_output)
# Transition sentence should only be used not final answer.
_explanation = _extract_explanation(cleaned_output)
return AgentAction(_action, _action_input, cleaned_output)
except Exception:
if cleaned_output.startswith("Action:"):
lines = cleaned_output.splitlines()
action = lines[1].strip()
import textwrap
action_input = textwrap.dedent("\n".join(lines[2:])).strip()
return AgentAction(action, action_input, cleaned_output)
return AgentFinish({"output": cleaned_output}, cleaned_output)
@property
def _type(self) -> str:
return "conversational_chat"
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~callbacks~streaming_stdout.py | """Callback Handler streams to stdout on new llm token."""
import sys
from typing import Any, Dict, List, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
class StreamingStdOutCallbackHandler(BaseCallbackHandler):
"""Callback handler for streaming. Only works with LLMs that support streaming."""
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
"""Run when LLM starts running."""
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
sys.stdout.write(token)
sys.stdout.flush()
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
"""Run when LLM errors."""
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain starts running."""
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
def on_chain_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
"""Run when chain errors."""
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> None:
"""Run when tool starts running."""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
pass
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
def on_tool_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
"""Run when tool errors."""
def on_text(self, text: str, **kwargs: Any) -> None:
"""Run on arbitrary text."""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end."""
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~models~azure_openai.py | """Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
from typing import Any, Dict, Mapping
from pydantic import root_validator
from real_agents.adapters.models.openai import ChatOpenAI
from langchain.schema import ChatResult
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class AzureChatOpenAI(ChatOpenAI):
"""Wrapper around Azure OpenAI Chat Completion API. To use this class you
must have a deployed model on Azure OpenAI. Use `deployment_name` in the
constructor to refer to the "Model deployment name" in the Azure portal.
In addition, you should have the ``openai`` python package installed, and the
following environment variables set or passed in constructor in lower case:
- ``OPENAI_API_TYPE`` (default: ``azure``)
- ``OPENAI_API_KEY``
- ``OPENAI_API_BASE``
- ``OPENAI_API_VERSION``
For exmaple, if you have `gpt-35-turbo` deployed, with the deployment name
`35-turbo-dev`, the constructor should look like:
.. code-block:: python
AzureChatOpenAI(
deployment_name="35-turbo-dev",
openai_api_version="2023-03-15-preview",
)
Be aware the API version may change.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
"""
deployment_name: str = ""
openai_api_type: str = "azure"
openai_api_base: str = ""
openai_api_version: str = ""
openai_api_key: str = ""
openai_organization: str = ""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values,
"openai_api_key",
"OPENAI_API_KEY",
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
)
openai_api_version = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
)
openai_api_type = get_from_dict_or_env(
values,
"openai_api_type",
"OPENAI_API_TYPE",
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_type = openai_api_type
openai.api_base = openai_api_base
openai.api_version = openai_api_version
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
**super()._default_params,
"engine": self.deployment_name,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**self._default_params}
@property
def _llm_type(self) -> str:
return "azure-openai-chat"
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
for res in response["choices"]:
if res.get("finish_reason", None) == "content_filter":
raise ValueError(
"Azure has not provided the response due to a content"
" filter being triggered"
)
return super()._create_chat_result(response)
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~plugins_agent~executors~plugin_executor.py | """Executor that manage the plugins calling"""
from __future__ import annotations
from typing import Any, Callable, Dict, Union
from pydantic import BaseModel, Extra
from langchain.base_language import BaseLanguageModel
from real_agents.adapters.data_model import SpecModel
from real_agents.plugins_agent import APICallingChain
from real_agents.plugins_agent.plugins.utils import load_plugin_elements_by_name
from real_agents.adapters.data_model.utils import indent_multiline_string
class PluginExecutor(BaseModel):
"""Executor to call plugins that handle the spec showing, endpoint calling and output modeling."""
name: str
description: str
spec_model: SpecModel
meta_info: Dict[str, Any]
endpoint2caller: Dict[str, Callable]
endpoint2output_model: Dict[str, Callable]
api_key: str = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def full_description(self, add_extra_description_from_plugin=True):
description = (
self.description + "\nOpenAPI information:\n" + indent_multiline_string(
self.spec_model.prepare_spec())
if add_extra_description_from_plugin
else self.description
)
return description
@classmethod
def from_plugin_name(cls, plugin_name: str, ) -> PluginExecutor:
plugin_info = load_plugin_elements_by_name(plugin_name)
return cls(
name=plugin_info["name"],
description=plugin_info["description"],
spec_model=plugin_info["spec_model"],
meta_info=plugin_info["meta_info"],
endpoint2caller=plugin_info["endpoint2caller"],
endpoint2output_model=plugin_info["endpoint2output_model"],
)
def run(
self,
user_intent: str,
llm: BaseLanguageModel,
) -> Union[str, Dict[str, Any]]:
inputs = {"input_str": user_intent}
method = APICallingChain.from_llm_and_plugin(
llm,
self.meta_info,
self.spec_model,
self.endpoint2caller,
self.endpoint2output_model,
self.api_key,
)
output = method(inputs)
return output
def load_personnel_info(self):
new_endpoint2caller, new_endpoints2output_model = self.spec_model.load_personnel_info(
api_key=self.api_key)
self.endpoint2caller = self.endpoint2caller | new_endpoint2caller
self.endpoint2output_model = self.endpoint2output_model | new_endpoints2output_model
| [] |
2024-01-10 | xlang-ai/OpenAgents | backend~api~chat_webot.py | from time import sleep
import copy
import redis
import json
import pickle
import traceback
from flask import Response, request, stream_with_context
from typing import Dict, Union
import os
from langchain.schema import HumanMessage, SystemMessage
from backend.api.language_model import get_llm
from backend.main import app, message_id_register, message_pool, logger
from backend.utils.streaming import single_round_chat_with_agent_streaming
from backend.schemas import OVERLOAD, NEED_CONTINUE_MODEL
from backend.schemas import DEFAULT_USER_ID
from real_agents.adapters.llm import BaseLanguageModel
from real_agents.adapters.agent_helpers import AgentExecutor, Tool
from real_agents.adapters.callbacks.agent_streaming import \
AgentStreamingStdOutCallbackHandler
from real_agents.adapters.models import ChatOpenAI
from real_agents.adapters.memory import ConversationReActBufferMemory
from real_agents.adapters.data_model import DataModel, JsonDataModel
from real_agents.adapters.interactive_executor import initialize_webot_agent
from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor
r = redis.Redis(host=os.getenv("REDIS_SERVER"), port=6379, db=0) # adjust host/port/db as needed
# here webot and webot_status are stored in redis since the two global variable can not be modified and accessed normally in multiprocess
# fixme:now webot is stored without message_id or chat_id info, so it can only be used for one chat at a time
# fixme:now webot_status is stored with chat_id info, if the status is not reset after a message ended abnormally e.g. the message is interrupted, it will be reused wrongly for the next chat
def get_webot_from_redis(user_id: str, chat_id: str, ) -> WebBrowsingExecutor:
data = r.get(f'webot_{user_id}_{chat_id}')
if data is not None:
webot = pickle.loads(data)
else:
# initialize a webot with None instrucition if webot does not exist
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
return webot
def save_webot_to_redis(user_id: str, chat_id: str, webot: WebBrowsingExecutor, ):
r.set(f'webot_{user_id}_{chat_id}', pickle.dumps(webot))
def get_webot_status_from_redis(user_id: str, chat_id: str):
webot_status_json = r.get(f'webot_status_{user_id}_{chat_id}')
if webot_status_json is not None:
webot_status = json.loads(webot_status_json)
return webot_status
else:
return {}
def save_webot_status_to_redis(user_id: str, chat_id: str, webot_status: Dict):
r.set(f'webot_status_{user_id}_{chat_id}', json.dumps(webot_status))
def reset_webot(user_id: str, chat_id: str):
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
def reset_webot_status(user_id: str, chat_id: str):
webot_status = {"webot_status": "idle", "url": None}
save_webot_status_to_redis(user_id, chat_id, webot_status)
# this function has been deprecated
def get_plan(instruction: str, start_url: str, chat_llm: ChatOpenAI):
# fixme: Move this into a separate chain or executors to decompose the LLMs
system_message = f"""
You are a planner to assist another browser automation assistant.
Here is the instruction for the other assistant:
```
You MUST take one of the following actions. NEVER EVER EVER make up actions that do not exist:
1. click(element): Clicks on an element
2. setValue(element, value: string): Focuses on and sets the value of an input element
3. finish(): Indicates the task is finished
4. fail(): Indicates that you are unable to complete the task
You will be be given a task to perform and the current state of the DOM. You will also be given previous actions that you have taken. You may retry a failed action up to one time.
This is an example of an action:
<Thought>I should click the add to cart button</Thought>
<Action>click(223)</Action>
You MUST always include the <Thought> and <Action> open/close tags or else your response will be marked as invalid.
Rules you MUST follow:
1. You must only take one step at a time. You cannot take multiple actions in a single response.
2. You should not consider the action to present the result to the user. You only need to do available actions. If info in current page is enough for the user to solve the problem, you should finish.
```
Now your responsibility is to give a step-by-step plan according to user's instruction. This plan will be given to the assistant as a reference when it is performing tasks.
""".strip()
human_message = f"""
The user requests the following task:
{instruction}
Now you are at {start_url}
Provide a plan to do this (you can use pseudo description as below to describe the item).
Here is an example case:
request: Go to google calendar to schedule a meeting
current url: "https://google.com"
example plan:
1. setValue(searchBar, "google calendar")
2. click(search)
3. click(the item with title of google calendar)
4.1 if user has loginned
do nothing
4.2 if user hasn't loginned
do login
5. click(create event button)
6. setValue(event title input bar, "meeting")
7. click(save event button)
8. finish()
""".strip()
messages = [SystemMessage(content=system_message),
HumanMessage(content=human_message)]
response = chat_llm(messages).content
return response
def create_webot_interaction_executor(
llm: BaseLanguageModel,
llm_name: str,
user_id: str,
chat_id: str
) -> AgentExecutor:
"""Creates an agent executor for interaction.
Args:
llm: A llm model.
llm_name: A string llm name.
user_id: A string of user id.
chat_id: A string chat id.
Returns:
An agent executor.
"""
# Initialize memory
memory = ConversationReActBufferMemory(memory_key="chat_history",
return_messages=True, max_token_limit=10000)
class RunWebot:
def __init__(self, webot: WebotExecutor, llm: BaseLanguageModel, user_id: str,
chat_id: str):
self.llm = llm
self.webot = webot
self.user_id = user_id
self.chat_id = chat_id
def run(self, term: str) -> Union[str, Dict, DataModel]:
try:
user_id = self.user_id
chat_id = self.chat_id
reset_webot(user_id=user_id, chat_id=chat_id)
reset_webot_status(user_id=user_id, chat_id=chat_id)
raw_observation = self.webot.run(user_intent=term, llm=self.llm)
instruction, start_url = raw_observation["instruction"], \
raw_observation["start_url"]
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
webot.instruction = instruction
# webot.plan = get_plan(instruction, start_url)
webot.plan = ""
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
webot_status = {
"webot_status": "running",
"url": start_url
}
save_webot_status_to_redis(user_id=user_id, chat_id=chat_id,
webot_status=webot_status)
while True:
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
if webot.finish or webot.interrupt or webot.error or webot.fail:
break
else:
sleep(0.5)
save_webot_status_to_redis(user_id=user_id, chat_id=chat_id,
webot_status={"webot_status": "idle",
"url": None})
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
webot.instruction = None
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
if webot.finish:
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
action_history = webot.action_history
last_page = webot.pages_viewed[-1]
observation = JsonDataModel.from_raw_data(
{
"success": True,
"result": json.dumps({"action_history": action_history,
"last_page": last_page}, indent=4),
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.fail:
observation = JsonDataModel.from_raw_data(
{
"success": True,
"result": "The webot failed to execute the instruction.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.interrupt:
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": "The web browsing is interrupted by user.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.error:
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": "Error occurs during web browsing.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
except Exception as e:
print(traceback.format_exc())
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": f"Failed in web browsing with the input: {term}, please try again later.",
"intermediate_steps": json.dumps({"error": str(e)})
}
)
return observation
webot = WebotExecutor.from_webot()
llm = copy.deepcopy(llm)
run_webot = RunWebot(webot, llm, chat_id=chat_id, user_id=user_id)
tools = [Tool(name=webot.name, func=run_webot.run, description=webot.description)]
continue_model = llm_name if llm_name in NEED_CONTINUE_MODEL else None
interaction_executor = initialize_webot_agent(
tools, llm, continue_model, memory=memory, verbose=True
)
return interaction_executor
@app.route("/api/chat_xlang_webot", methods=["POST"])
def chat_xlang_webot() -> Dict:
"""Returns the chat response of web agent."""
try:
# Get request parameters
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
user_intent = request_json["user_intent"]
parent_message_id = request_json["parent_message_id"]
llm_name = request_json["llm_name"]
temperature = request_json.get("temperature", 0.4)
stop_words = ["[RESPONSE_BEGIN]", "TOOL RESPONSE"]
kwargs = {
"temperature": temperature,
"stop": stop_words,
}
# Get language model
llm = get_llm(llm_name, **kwargs)
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat",
msg_head="Request json").debug(request_json)
human_message_id = message_id_register.add_variable(user_intent)
ai_message_id = message_id_register.add_variable("")
stream_handler = AgentStreamingStdOutCallbackHandler()
# Build executor and run chat
# reset webot and status
reset_webot(user_id=user_id, chat_id=chat_id)
reset_webot_status(user_id=user_id, chat_id=chat_id)
interaction_executor = create_webot_interaction_executor(
llm=llm,
llm_name=llm_name,
chat_id=chat_id,
user_id=user_id
)
activated_message_list = message_pool.get_activated_message_list(user_id,
chat_id,
list(),
parent_message_id)
message_pool.load_agent_memory_from_list(interaction_executor.memory,
activated_message_list)
return stream_with_context(
Response(
single_round_chat_with_agent_streaming(
interaction_executor=interaction_executor,
user_intent=user_intent,
human_message_id=human_message_id,
ai_message_id=ai_message_id,
user_id=user_id,
chat_id=chat_id,
message_list=activated_message_list,
parent_message_id=parent_message_id,
stream_handler=stream_handler,
llm_name=llm_name,
app_type="webot",
),
content_type="application/json",
)
)
except Exception as e:
import traceback
traceback.print_exc()
return Response(response=None,
status=f"{OVERLOAD} backend is currently overloaded")
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~data_agent~executors~kaggle_data_loading_executor.py | import json
import os
import re
import shutil
import uuid
from typing import Any, Dict, List, Tuple
import requests
from bs4 import BeautifulSoup
from loguru import logger
from kaggle.api.kaggle_api_extended import KaggleApi
from langchain.base_language import BaseLanguageModel
from langchain import PromptTemplate
from real_agents.adapters.llm import LLMChain
class KaggleDataLoadingExecutor:
KAGGLE_TEMPLATE = """
Determine whether the user input aims to (1) connect to a specific kaggle dataset that the user mentions its kaggle path
(2) search for relevant kaggle datasets given the information the user provides.
You need to output the action wrapped in <action></action>, the action space is ['connect', 'search']. You also need
to output the keywords wrapped in <keywords></keywords>. For 'search', the keywords MUST be ONE search term/word to be
searched by kaggle api. Note keywords CAN'T be too specific or contain trivial word(e.g., dataset), make sure there are various search results. For
'connect', the keywords are the kaggle dataset path.
Input: {input}
Begin."
"""
def run(
self,
user_intent: str,
llm: BaseLanguageModel,
search_top_k: int = 4,
) -> Dict[str, Any]:
logger.bind(msg_head="KaggleDataLoader inputs").trace(user_intent)
kaggle_template = PromptTemplate(
input_variables=["input"],
template=self.KAGGLE_TEMPLATE,
)
method = LLMChain(llm=llm, prompt=kaggle_template)
result = method.run({"input": user_intent})
logger.bind(msg_head="LLM result").trace(result)
kaggle_action, keywords = self._parse_output(result)
logger.bind(msg_head="Kaggle action").trace(kaggle_action)
logger.bind(msg_head="Kaggle keywords").trace(keywords)
"""Use export to manage the Kaggle API key for now."""
api = KaggleApi()
api.authenticate()
if kaggle_action == "connect":
kaggle_output_info = keywords
elif kaggle_action == "search":
kaggle_output_info = self._search_kaggle(api, keywords, search_top_k)
else:
# Regard the rest as "search" action now
kaggle_action = "search"
kaggle_output_info = self._search_kaggle(api, keywords, search_top_k)
return {"kaggle_action": kaggle_action, "kaggle_output_info": kaggle_output_info}
def _search_kaggle(self, api: KaggleApi, keywords: str, search_top_k: int) -> List[Dict]:
"""Search kaggle datasets given the keywords."""
# Search for datasets
datasets = []
for page in range(1, 10):
try:
searched_datasets = api.dataset_list(search=keywords, page=page, max_size=20000, file_type="csv")
logger.bind(msg_head="Kaggle search result").trace(searched_datasets)
datasets.extend(searched_datasets)
if len(datasets) >= search_top_k:
datasets = datasets[:search_top_k]
break
if len(searched_datasets) < 20:
# Default page_size is 20, less than 20 means no more datasets can be searched
break
except Exception:
break
# Get url, cover image and some meta data for each dataset
if len(datasets) == 0:
# No datasets found
datasets = api.dataset_list(max_size=20000, page=1, file_type="csv")[:search_top_k]
output_info = self._get_dataset_meta_info(api, datasets)
return output_info
def _get_dataset_meta_info(self, api: KaggleApi, datasets: List) -> List[Dict]:
"""Get dataset key meta-data to be shown to the user."""
output_info = []
for dataset in datasets:
dataset_hash_id = str(uuid.uuid4())
dataset_tmp_dir = os.path.join(".kaggle_meta/", dataset_hash_id)
os.makedirs(dataset_tmp_dir, exist_ok=True)
api.dataset_metadata(dataset.ref, path=dataset_tmp_dir)
with open(os.path.join(dataset_tmp_dir, "dataset-metadata.json")) as f:
dataset_metadata = json.load(f)
shutil.rmtree(os.path.join(".kaggle_meta/", dataset_hash_id))
dataset_url = "https://www.kaggle.com/datasets/" + dataset.ref
# Crawling the dataset page to get the dataset image
dataset_cover_image_url = self._crawl_dataset_cover_image(dataset_url)
logger.bind(msg_head="Dataset cover image url").trace(dataset_cover_image_url)
output_metadata = {
"id": dataset_metadata["id"],
"id_no": dataset_metadata["id_no"],
"title": dataset_metadata["title"],
"subtitle": dataset_metadata["subtitle"],
"total_views": dataset_metadata["totalViews"],
"total_votes": dataset_metadata["totalVotes"],
"total_downloads": dataset_metadata["totalDownloads"],
"url": dataset_url,
"cover_image_url": dataset_cover_image_url,
}
output_info.append(output_metadata)
return output_info
def _crawl_dataset_cover_image(
self, url: str, default_image_path="https://images.datacamp.com/image/upload/v1647430873/kaggle_logo_icon_168474_4eb653edb6.png"
) -> str:
"""Crawl the kaggle dataset cover image from the dataset url."""
# Get the HTML content of the webpage
response = requests.get(url)
# Parse the HTML with BeautifulSoup
soup = BeautifulSoup(response.text, "html.parser")
# Find the image element
try:
kaggle_component_element = soup.find("script", {"class": "kaggle-component"})
match = re.search(r'"coverImageUrl":\s*"([^"]*)"', kaggle_component_element.string)
image_url = match.group(1)
except Exception:
import traceback
traceback.print_exc()
image_url = default_image_path
return image_url
def _parse_output(self, content: str) -> Tuple[str, str]:
"""Parse the output of the LLM to get the kaggle action and keywords."""
from bs4 import BeautifulSoup
# Using 'html.parser' to parse the content
soup = BeautifulSoup(content, "html.parser")
# Parsing the tag and summary contents
try:
action = soup.find("action").text
except Exception:
action = ""
try:
keywords = soup.find("keywords").text
except Exception:
keywords = ""
return action, keywords
| [
"\n\nDetermine whether the user input aims to (1) connect to a specific kaggle dataset that the user mentions its kaggle path\n(2) search for relevant kaggle datasets given the information the user provides.\n\nYou need to output the action wrapped in <action></action>, the action space is ['connect', 'search']. You also need\nto output the keywords wrapped in <keywords></keywords>. For 'search', the keywords MUST be ONE search term/word to be\nsearched by kaggle api. Note keywords CAN'T be too specific or contain trivial word(e.g., dataset), make sure there are various search results. For\n'connect', the keywords are the kaggle dataset path.\n\nInput: {input}\n\nBegin.\"\n",
"input",
"connect"
] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~data_agent~copilot.py | """An agent designed to hold a conversation in addition to using tools."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence, Tuple, Union
from typing_extensions import override
from pydantic import Field
from langchain.agents.agent import AgentOutputParser
from langchain.agents.utils import validate_tools_single_input
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, Callbacks
from langchain.chains import LLMChain
from langchain.schema import AgentAction, AgentFinish, HumanMessage, AIMessage, BaseMessage, BaseOutputParser
from langchain.tools.base import BaseTool
from real_agents.adapters.agent_helpers.agent import Agent
from real_agents.adapters.agent_helpers.output_parser import ConversationOutputParser
from real_agents.data_agent.copilot_prompt import PREFIX, SUFFIX, TEMPLATE_TOOL_RESPONSE, fake_continue_prompt
from real_agents.adapters.data_model import DataModel, MessageDataModel
from langchain.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
class ExceptionTool(BaseTool):
name = "_Exception"
description = "Exception tool"
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
return query
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
return query
class ConversationalChatAgent(Agent):
"""An agent designed to hold a conversation in addition to using data tools."""
output_parser: ConversationOutputParser = Field(default_factory=ConversationOutputParser())
template_tool_response: str = TEMPLATE_TOOL_RESPONSE
continue_model: Optional[str] = None
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> ConversationOutputParser:
return ConversationOutputParser()
@property
def _agent_type(self) -> str:
raise NotImplementedError
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(cls.__name__, tools)
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
system_message: str = PREFIX,
human_message: str = SUFFIX,
input_variables: Optional[List[str]] = None,
output_parser: Optional[BaseOutputParser] = None,
) -> BasePromptTemplate:
# tools
tool_strings = "\n".join([f"> {tool.name}: {tool.description}" for tool in tools])
tool_names = ", ".join([tool.name for tool in tools])
_output_parser = output_parser or cls._get_default_output_parser()
# format instructions for system message
format_instructions = _output_parser.get_format_instructions()
format_instructions = format_instructions.format(tool_names=tool_names)
# system message
system_message = system_message + f"{tool_strings}\n\n{format_instructions}"
# human input
final_prompt = human_message
if input_variables is None:
input_variables = ["input", "chat_history", "agent_scratchpad"]
messages = [
SystemMessagePromptTemplate.from_template(system_message),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(final_prompt),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
@override
def _construct_scratchpad(self, intermediate_steps: List[Tuple[AgentAction, str]]) -> List[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts: List[BaseMessage] = []
# Try to only use AI message for scratchpad
content = []
for idx, (action, full_observation) in enumerate(intermediate_steps):
content.append(MessageDataModel.extract_action_for_llm(action.log))
observation = full_observation
if isinstance(full_observation, DataModel):
llm_raw_observation = full_observation.get_llm_side_data()
observation = MessageDataModel.extract_tool_response_for_llm(llm_raw_observation)
tool_response = self.template_tool_response.format(
observation=str(observation), tool_names=self.allowed_tools
)
if idx == len(intermediate_steps) - 1:
content.append(tool_response)
else:
content.append(observation)
content_str = "\n".join(content)
thoughts.append(AIMessage(content=content_str))
if self.continue_model is not None and len(intermediate_steps) != 0:
thoughts.append(HumanMessage(content=fake_continue_prompt[self.continue_model]))
return thoughts
@override
def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
system_prompt = self.llm_chain.prompt.messages[0].format().content
system_prompt_tokens = MessageDataModel._count_tokens(system_prompt)
max_tokens = 8000
max_gen_tokens = 1000
# FIXME: need more accurate token limit calculation
full_inputs = MessageDataModel.truncate_chat_history(
full_inputs, max_token=max_tokens - system_prompt_tokens - max_gen_tokens
)
full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs)
return self.output_parser.parse(full_output)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callbacks: Callbacks = None,
output_parser: Optional[AgentOutputParser] = None,
system_message: str = PREFIX,
human_message: str = SUFFIX,
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
_output_parser = output_parser or cls._get_default_output_parser()
prompt = cls.create_prompt(
tools,
system_message=system_message,
human_message=human_message,
input_variables=input_variables,
output_parser=_output_parser,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
)
tool_names = [tool.name for tool in tools]
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
| [
"system_messagefb864210-55d5-49f3-a738-3bf0259683edPLACEHOLDER\n\nPLACEHOLDER",
"Exception tool"
] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~plugins_agent~plugins~tool_selector.py | """Implementation of Tool Selector that automate the selection of tools for the question (or sub-question)."""
import os
import pickle
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
from typing import Any
from real_agents.adapters.data_model import SpecModel
from langchain.embeddings.huggingface import HuggingFaceInstructEmbeddings
DEFAULT_TOOL_INSTRUCTION = "Represent the tool description for retrieval:"
DEFAULT_QUERY_INSTRUCTION = "Represent the question for retrieving tools that can be used to solve the question:"
PLUGIN_SPEC_FILE = "openapi.yaml"
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
EMBEDDING_CACHE_PATH = os.path.join(CURRENT_PATH, "..", "..", "..", "backend", "static", "tool_embeddings")
if not os.path.exists(EMBEDDING_CACHE_PATH):
os.makedirs(EMBEDDING_CACHE_PATH)
class ToolSelector:
"""
This class is used to select the appropriate tool list for the question.
"""
# add valid mode here if needed
valid_modes = ["embedding"]
"""
Example:
.. code-block:: python
mode_args = {"embedding": HuggingFaceInstructEmbeddings, "model_name": "hkunlp/instructor-large", "embed_instruction": "Represent the tool description for retrieval:", "query_instruction": "Represent the question for retrieving tools that can be used to solve the question:"}
tool_selector = ToolSelector(mode="embedding", mode_args=mode_args)
model_name = "hkunlp/instructor-large"
model_kwargs = {'device': 'cpu'}
hf = HuggingFaceInstructEmbeddings(
model_name=model_name, model_kwargs=model_kwargs
)
"""
user_id: str = None
chat_id: str = None
def __init__(self, tools_list: list = [], mode: str = "embedding", mode_args=None, api_key_pool: Any = None):
"""
Initialize the tool selector.
"""
if mode_args is None:
mode_args = {}
if mode not in self.valid_modes:
raise ValueError(f"Invalid mode '{mode}'. Valid modes are {self.valid_modes}")
self.tool_paths = [
plugin_file_path
for plugin_file_path in os.listdir(CURRENT_PATH)
if ".py" not in plugin_file_path
and plugin_file_path != "_scripts"
and plugin_file_path != "__pycache__"
and plugin_file_path != "README.md"
and plugin_file_path != "descriptions.json"
]
self.tool_list = tools_list
self.mode = mode
self.api_key_pool = api_key_pool
if mode == "embedding":
self._init_embedding(mode_args)
else:
raise ValueError(f"Unhandled mode '{mode}'.")
def _init_embedding(self, mode_args: dict):
embedding = mode_args.get("embedding", HuggingFaceInstructEmbeddings)
if embedding == HuggingFaceInstructEmbeddings:
model_name = mode_args.get("model_name", "hkunlp/instructor-large")
embed_instruction = mode_args.get("embed_instruction", DEFAULT_TOOL_INSTRUCTION)
query_instruction = mode_args.get("query_instruction", DEFAULT_QUERY_INSTRUCTION)
self.embedding = HuggingFaceInstructEmbeddings(
model_name=model_name, embed_instruction=embed_instruction, query_instruction=query_instruction
)
def get_tool_descriptions(self) -> list:
"""
Get the tool descriptions.
"""
descriptions = []
tool_paths = self.tool_paths
yaml_paths = [os.path.join(CURRENT_PATH, tool_name, PLUGIN_SPEC_FILE) for tool_name in tool_paths]
for yaml_path, plugin_file_path in tqdm(zip(yaml_paths, tool_paths), total=len(yaml_paths)):
if os.path.isdir(os.path.join(CURRENT_PATH, plugin_file_path)):
retrieved = False
try:
spec_model = SpecModel(yaml_path)
retrieved = True
except:
print("Error loading yaml", yaml_path)
if not retrieved:
description = "No description."
else:
description = (
spec_model.full_spec["info"]["description"] if "description" in spec_model.full_spec[
"info"] else "No description."
)
descriptions.append(description)
return descriptions
def get_api_key_from_tool_name(self, tool_name: str) -> str:
"""
Get the API key from the tool name.
"""
user_id = self.user_id
api_key_info = self.api_key_pool.get_pool_info_with_id(user_id, default_value=[])
if len([i for i in api_key_info if i["tool_name"] == tool_name]) != 0:
api_key = [i for i in api_key_info if i["tool_name"] == tool_name][0]["api_key"]
else:
api_key = None
return api_key
def check_plugin_valid(self, tool_path: str) -> bool:
"""
Check if the plugin is valid. Return false if this plugin requires an API key but the user has not provided one or plugin not found.
"""
plugins = self.tool_list
# check if plugin exists and get the plugin if it exists
if len([i for i in plugins if i["name"].lower() == tool_path.lower()]) != 0:
plugin = [i for i in plugins if i["name"].lower() == tool_path.lower()][0]
else:
plugin = None
print(f"Plugin {tool_path} not found.")
# check if plugin requires an API key and if the user has provided one
if plugin is not None:
if plugin["require_api_key"] and self.get_api_key_from_tool_name(tool_path) == None:
return False
else:
return True
else:
return False
def load_query_from_message_list(self, message_list: list[dict[str, str]], user_intent: str) -> str:
"""
Load the query from the message list.
"""
"""
Example:
message_list = [{'message_type': 'human_message', 'message_content': 'buy nike shoes', 'message_id': 362, 'parent_message_id': -1}, {'message_type': 'ai_message', 'message_content': '', 'message_id': 363, 'parent_message_id': 362}]
"""
# concatenate all history messages into one single query
# The message_list is the history message list so we need to concatenate user intent(current message) to the end of the message list
query = ""
for message in message_list:
# only concatenate human messages since we only need to retrieve tools based on user intent and the ai_message can be long sometimes which will influence the embedding
if "message_content" in message.keys() and "message_type" in message.keys() and message[
"message_type"] == 'human_message':
query += (message["message_content"] + " ")
else:
continue
query += user_intent
return query
def select_tools(self, query: str = "", top_k: int = 8):
"""
Select the top k tools based on the similarity between the query and the tool description.
"""
if query == "":
raise ValueError("Query cannot be empty.")
if self.mode not in self.valid_modes:
raise ValueError(f"Invalid mode '{self.mode}'. Valid modes are {self.valid_modes}")
if self.mode == "embedding":
return self._select_tools_embedding(query, top_k)
else:
raise ValueError(f"Unhandled mode '{self.mode}'.")
def _select_tools_embedding(self, query: str, top_k: int) -> list[str]:
embedding = self.embedding
# check if the embedding is InstructorEmbeddings
if isinstance(self.embedding, HuggingFaceInstructEmbeddings):
tool_embeddings = []
for name, description in zip(self.tool_paths, self.get_tool_descriptions()):
# Define file path for the cached embedding
tool_embedding_file = EMBEDDING_CACHE_PATH + "/" + name + ".pkl"
# Check if tool embedding is already cached
if os.path.isfile(tool_embedding_file):
with open(tool_embedding_file, "rb") as f:
tool_embedding = pickle.load(f)
# no cached embedding, compute and cache it
else:
tool_embedding = embedding.embed_documents([description])
with open(tool_embedding_file, "wb") as f:
pickle.dump(tool_embedding, f)
tool_embeddings.extend(tool_embedding)
query_embeddings = [embedding.embed_query(query)]
similarities = cosine_similarity(query_embeddings, tool_embeddings)
# eliminate invalid plugins
for idx, tool_path in enumerate(self.tool_paths):
if not self.check_plugin_valid(tool_path):
similarities[0][idx] = -1
# get indices of top k similarities
top_k_indices = np.argsort(similarities.flatten())[-top_k:]
top_k_indices = top_k_indices.tolist()
# return upper case tool names since tool id is the upper case of its name
return [tool_name.upper() for idx, tool_name in enumerate(self.tool_paths) if idx in top_k_indices]
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~callbacks~agent_streaming.py | """Callback Handler streams to stdout on new llm token."""
from typing import Any, Dict, List, Union
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from real_agents.adapters.data_model import DataModel
class JSON_PDA:
def __init__(self):
self.stack = []
self.state = "start"
self.json = {}
self.current_key = ""
self.current_value = ""
self.escape_next = False
def transition(self, char):
if self.escape_next:
# Add the escaped character to the current key or value and return
if self.state == "open_key_quote":
self.current_key += char
elif self.state == "open_value_quote" or self.state == "open_value_quote_brace":
self.current_value += char
self.escape_next = False
return
if char == "\\":
# The next character is an escaped character
self.escape_next = True
return
if self.state == "start":
if char == "{":
self.stack.append("{")
self.state = "open_brace"
elif char == "`":
self.state = "open_one_backtick"
self.stack.append("`")
elif self.state == "open_one_backtick":
if char == "`":
if self.stack[-1] == "`":
self.state = "open_two_backticks"
self.stack.append("`")
else:
while self.stack.pop() != "`":
pass
self.state = "start"
else:
self.stack.append(char)
elif self.state == "open_two_backticks":
if char == "`":
if self.stack[-1] == "`":
self.state = "after_backtick"
self.stack.append("`")
else:
while self.stack.pop() != "`":
pass
self.state = "start"
else:
self.stack.append(char)
elif self.state == "after_backtick":
if char == "\n":
self.state = "after_backtick_newline"
elif self.state == "after_backtick_newline":
if char == "{":
self.stack.append("{")
self.state = "open_brace"
elif char == "\n":
self.state = "after_backtick_newline"
else:
self.state = "in_block"
elif self.state == "in_block":
if char == "`":
self.stack.pop()
if len(self.stack) == 0:
self.state = "start"
elif self.state == "open_brace" or self.state == "comma":
if char == '"':
self.stack.append('"')
self.state = "open_key_quote"
self.current_key = ""
elif self.state == "open_key_quote" or self.state == "open_value_quote":
if char != '"':
if self.state == "open_key_quote":
self.current_key += char
else:
self.current_value += char
else:
self.stack.pop()
if self.state == "open_key_quote":
self.state = "close_key_quote"
else:
self.state = "close_value_quote"
elif self.state == "open_value_quote_brace":
if char == "{":
self.stack.append("{")
elif char == "}":
self.stack.pop()
if self.stack[-1] == "{" and self.stack[-2] != "{":
self.state = "close_value_quote"
self.current_value += char
elif self.state == "close_key_quote":
if char == ":":
self.state = "after_key"
elif self.state == "after_key":
if char == '"':
self.stack.append('"')
self.state = "open_value_quote"
self.current_value = ""
elif char == "{":
self.stack.append("{")
self.state = "open_value_quote_brace"
self.current_value = "{"
elif self.state == "close_value_quote":
self.json[self.current_key] = self.current_value
if char == ",":
self.state = "after_value"
elif char == "}":
self.stack.pop()
if len(self.stack) == 0:
self.state = "start"
elif len(self.stack) == 3:
self.state = "close_brace"
elif self.state == "after_value":
if char == '"':
self.stack.append('"')
self.state = "open_key_quote"
elif self.state == "close_brace":
if char == "`":
self.stack.pop()
if len(self.stack) == 0:
self.state = "start"
class AgentStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
is_end = False
generated_tokens: list = []
for_display: list = []
# Automata
pda = JSON_PDA()
llm_call_id = 0
_in_json = False
_in_key = False
_in_value = False
_direct_display = True
_normal_json = False
json_key: str = ""
json_tmp_stack: list = []
action_key_appear = False
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
self.is_end = False
self.generated_tokens = []
self.pda = JSON_PDA()
self.llm_call_id += 1
self._in_json = False
self._in_key = False
self._in_value = False
self._direct_display = True
self._normal_json = False
self.json_key = ""
self.json_tmp_stack = []
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""
Run on new LLM token. Only available when streaming is enabled.
The tokens that we can decide their types ('plain', 'identifier', 'key', 'action', 'action_input') are stored in `self.for_display`.
"""
self.generated_tokens.append(token)
# Automata that monitor json block
for char in token:
self.pda.transition(char)
# Handle the logic of sentences and json blocks
_type = "plain"
if self.pda.state in ["open_brace", "open_one_backtick"]:
self._in_json = True
self._direct_display = False
self._normal_json = False
self.action_key_appear = False
if self._in_json and not self._normal_json:
_type = "identifier"
if self.pda.state == "in_block":
_type = "plain"
self._normal_json = True
if self.pda.state == "open_key_quote":
if self._in_key:
self.json_key += char
_type = "key"
self._in_key = True
else:
self._in_key = False
if self.pda.state == "open_value_quote" or self.pda.state == "open_value_quote_brace":
if self._in_value:
_type = self.json_key
self._in_value = True
else:
if self._in_value:
self.json_key = ""
self._in_value = False
if self.pda.state == "close_key_quote":
# Normal json block
if self.json_key not in ["action", "action_input"]:
for char_item in self.json_tmp_stack:
self.for_display.append(
{"text": char_item["text"], "type": "plain", "llm_call_id": self.llm_call_id}
)
self.json_tmp_stack = []
self.for_display.append({"text": char, "type": "plain", "llm_call_id": self.llm_call_id})
self._normal_json = True
continue
else:
if self.json_key == "action":
self.action_key_appear = True
elif self.json_key == "action_input" and self.action_key_appear:
# Action json block
for char_item in self.json_tmp_stack:
char_item["llm_call_id"] = self.llm_call_id
self.for_display.append(char_item)
self.json_tmp_stack = []
self._direct_display = True
else:
for char_item in self.json_tmp_stack:
self.for_display.append(
{"text": char_item["text"], "type": "plain", "llm_call_id": self.llm_call_id}
)
self.json_tmp_stack = []
self._direct_display = True
if self.pda.state == "start":
self._in_json = False
self.for_display.append(
{"text": char, "type": _type, "llm_call_id": self.llm_call_id}
) if self._direct_display else self.json_tmp_stack.append(
{"text": char, "type": _type, "llm_call_id": self.llm_call_id}
)
def on_llm_end(self, response, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.is_end = True
for char_item in self.json_tmp_stack:
self.for_display.append({"text": char_item["text"], "type": "plain", "llm_call_id": self.llm_call_id})
def on_tool_end(self, output: Union[DataModel, str], **kwargs: Any) -> None:
"""Run on tool end to add observation data model."""
self.for_display.append({"text": output, "type": "block", "llm_call_id": self.llm_call_id})
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~callbacks~executor_streaming.py | from typing import Any
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
class ExecutorStreamingChainHandler(StreamingStdOutCallbackHandler):
is_end: bool = False
_all = []
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
""""""
self._all.append(token)
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~executors~question_suggestion~user_profile.py | from __future__ import annotations
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from real_agents.adapters.executors.question_suggestion.base import QuestionSuggestionChainBase
from real_agents.adapters.executors.question_suggestion.prompts import QUESTION_SUGGESTION_PROMPT_USER_PROFILE
class QuestionSuggestionChainUserProfile(QuestionSuggestionChainBase):
@classmethod
def from_prompt(cls, llm: BaseLanguageModel) -> QuestionSuggestionChainUserProfile:
"""Load from user profile prompt."""
llm_chain = LLMChain(llm=llm, prompt=QUESTION_SUGGESTION_PROMPT_USER_PROFILE)
return cls(llm_chain=llm_chain)
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~web_agent~executors~web_browsing_executor.py | """
Implementation of the WebBrowsingExecutor.
WebBrowsingExecutor takes start_url and instruction as input, iteratively perform the actions on the web, and return the result.
"""
from typing import Any, Dict, List
from langchain.base_language import BaseLanguageModel
from real_agents.web_agent.web_browsing.react.base import ReActWebotChain
from real_agents.web_agent.web_browsing.end2end.base import WebotChain
from real_agents.adapters.data_model.html import HTMLDataModel
# This executor is for the extension usage and not for the chat interface.
# For the chat interface webot executor, refer xlang/real_agents/web_agent/executors/web_browsing_executor.py
class WebBrowsingExecutor:
"""
WebBrowsingExecutor takes start_url and instruction as input, iteratively perform the actions on the web, and return the result.
"""
def __init__(self, instruction: str, plan: str = "", mode: str = "react") -> None:
"""Initialize the executor"""
self.instruction: str = instruction
self.mode: str = mode
if self.mode == "react":
self.thoughts_taken: List[str] = []
self.actions_taken: List[str] = []
self.pages_viewed: List[Any] = []
self.plan: str = plan
@property
def finish(self):
return True if len(self.actions_taken) > 0 and "finish" in self.actions_taken[-1] else False
@property
def interrupt(self):
return True if len(self.actions_taken) > 0 and "interrupt" in self.actions_taken[-1] else False
@property
def error(self):
return True if len(self.actions_taken) > 0 and "error" in self.actions_taken[-1] else False
@property
def fail(self):
return True if len(self.actions_taken) > 0 and "fail" in self.actions_taken[-1] else False
@property
def action_history(self):
if self.mode == "basic":
action_history = "Action: "
for action in self.actions_taken:
action_history += action + " -> "
return action_history
elif self.mode == "react":
action_history = ""
for thought, action in zip(self.thoughts_taken, self.actions_taken):
action_history += thought + " -> " + action + " -> "
return action_history
else:
raise ValueError(f"The mode {self.mode} is not supported")
def run(
self,
page_info: Any,
llm: BaseLanguageModel,
) -> Dict[str, Any]:
model = HTMLDataModel.from_raw_data(raw_data=page_info)
processed_html = model.get_llm_side_data()
if self.mode == "basic":
method = WebotChain.from_llm(llm)
self.pages_viewed.append(processed_html)
action_element = method(
{"user_query": self.instruction, "previous_actions": self.actions_taken, "page_info": processed_html}
)
elif self.mode == "react":
method = ReActWebotChain.from_llm(llm)
self.pages_viewed.append(processed_html)
print("self.plan:", self.plan)
# example: {'success': True, 'message' = 'success', 'thought': "I should first set the value in the search field to '...'", 'action': 'setValue(93, "...")', 'parsedAction': {'name': 'setValue', 'args': {'elementId': 93, 'value': '...'}}}
webot_chain_return = method(
{
"user_query": self.instruction,
"plan": self.plan,
"previous_actions": self.actions_taken,
"previous_thoughts": self.thoughts_taken,
"page_info": processed_html,
}
)
else:
raise ValueError(f"The mode {self.mode} is not supported")
# "I should first set the value in the search field to '...'"
self.thoughts_taken.append(webot_chain_return["thought"])
# setValue(93, "...")
self.actions_taken.append(webot_chain_return["action"])
print("actions_taken:", self.actions_taken)
return webot_chain_return
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~callbacks~manager.py | from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.schemas import TracerSession
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
logger = logging.getLogger(__name__)
Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar("openai_callback", default=None)
tracing_callback_var: ContextVar[Optional[LangChainTracerV1]] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
def _get_debug() -> bool:
return langchain.debug
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get OpenAI callback handler in a context manager."""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get Tracer in a context manager."""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
session_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
tenant_id: Optional[str] = None,
session_extra: Optional[Dict[str, Any]] = None,
) -> Generator[TracerSession, None, None]:
"""Get the experimental tracer handler in a context manager."""
# Issue a warning that this is experimental
warnings.warn(
"The experimental tracing v2 is in development. " "This is not yet stable and may change in the future."
)
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
tenant_id=tenant_id,
session_name=session_name,
example_id=example_id,
session_extra=session_extra,
)
session = cb.ensure_session()
tracing_v2_callback_var.set(cb)
yield session
tracing_v2_callback_var.set(None)
def _handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager."""
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
getattr(handler, event_name)(*args, **kwargs)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
_handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
logging.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(None, functools.partial(event, *args, **kwargs))
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
logger.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager."""
await asyncio.gather(
*(
_ahandle_event_for_handler(handler, event_name, ignore_condition_name, *args, **kwargs)
for handler in handlers
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
) -> None:
"""Initialize run manager."""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations."""
return cls(uuid4(), [], [])
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
_handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
await _ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
_handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
_handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
_handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
await _ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
await _ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
await _ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
_handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
_handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
_handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
_handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
await _ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
await _ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
await _ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
await _ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running."""
_handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
_handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_tool_end_data_model(
self,
output,
**kwargs: Any,
):
"""Return the data model for the on_tool_end event."""
_handle_event(
self.handlers,
"on_tool_end_data_model",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
await _ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
await _ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that can be used to handle callbacks from langchain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
# Re-use the LLM Run Manager since the outputs are treated
# the same for now
return CallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForChainRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForToolRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> CallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that can be used to handle callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForChainRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForToolRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> AsyncCallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> T:
"""Configure the callback manager."""
callback_manager = callback_manager_cls([])
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers,
inheritable_handlers=inheritable_callbacks.inheritable_handlers,
parent_run_id=inheritable_callbacks.parent_run_id,
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
tracer = tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
os.environ.get("LANGCHAIN_TRACING") is not None
or tracer is not None
or os.environ.get("LANGCHAIN_HANDLER") is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = os.environ.get("LANGCHAIN_TRACING_V2") is not None or tracer_v2 is not None
tracer_session = os.environ.get("LANGCHAIN_SESSION")
debug = _get_debug()
if tracer_session is None:
tracer_session = "default"
if verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or open_ai is not None:
if verbose and not any(isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_session)
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer) for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(session_name=tracer_session)
handler.ensure_session()
callback_manager.add_handler(handler, True)
except Exception as e:
logger.debug("Unable to load requested LangChainTracer", e)
if open_ai is not None and not any(
isinstance(handler, OpenAICallbackHandler) for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
return callback_manager
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~data_agent~executors~code_generation_executor.py | from typing import Any, Dict, List, Literal, Optional, Union
from langchain.base_language import BaseLanguageModel
from real_agents.adapters.data_model import DatabaseDataModel, TableDataModel, ImageDataModel
from real_agents.adapters.memory import ReadOnlySharedStringMemory
from real_agents.adapters.schema import SQLDatabase
from real_agents.data_agent.python.base import PythonChain
from real_agents.data_agent.sql.base import SQLDatabaseChain
class CodeGenerationExecutor:
"""Code Generation Executor.
Example:
.. code-block:: python
from real_agents.adapters.executors import CodeGenerationExecutor
executor = CodeGenerationExecutor(programming_language="sql")
executor.run(
user_intent="What is the name of the first employee?",
grounding_source=SQLDatabase.from_uri(...)
)
"""
def __init__(
self,
programming_language: Literal["sql", "python"],
usage: Union[None, str] = None,
example_selector: Any = None,
memory: Optional[ReadOnlySharedStringMemory] = None,
) -> None:
"""Initialize the executor.
Args:
programming_language: Programming language to generate.
example_selector: Example selector to select few-shot in-context exemplars.
"""
self._programming_language = programming_language
self._usage = usage
self._example_selector = example_selector
self._memory = memory
@property
def programming_language(self) -> str:
"""Get programming language."""
return self._programming_language
def run(
self,
user_intent: str,
llm: BaseLanguageModel,
grounding_source: Optional[Union[List[TableDataModel], DatabaseDataModel, ImageDataModel]] = None,
user_id: str = None,
chat_id: str = None,
code_execution_mode: str = "local",
jupyter_kernel_pool: Any = None,
return_intermediate_steps: bool = True,
return_direct: bool = True,
verbose: bool = True,
) -> Dict[str, Any]:
"""Run the executor.
Args:
user_intent: User intent to execute.
grounding_source: Grounding source to execute the program on. should be {file_name: data}
llm: Language model to use.
return_intermediate_steps: Whether to return the intermediate steps, e.g., the program.
return_direct: Whether to return the result of program execution directly.
verbose: Whether to print the logging.
Returns:
Result dictionary of code generation
"""
def _concat_grounding_source() -> str:
assert isinstance(grounding_source, list)
table_schema = ""
for gs in grounding_source:
table_schema += f"{gs.get_llm_side_data()}\n"
return table_schema
if self._programming_language == "sql":
db = grounding_source.raw_data
assert isinstance(db, SQLDatabase)
method = SQLDatabaseChain(
llm=llm,
database=db,
example_selector=self._example_selector,
memory=self._memory,
return_direct=return_direct,
return_intermediate_steps=return_intermediate_steps,
verbose=verbose,
)
_input = {"user_intent": user_intent}
result = method(_input)
elif self._programming_language == "python":
if self._usage is None:
# General python code generation for data analysis
method = PythonChain.from_python_prompt(
llm,
return_intermediate_steps=return_intermediate_steps,
verbose=True,
memory=self._memory,
user_id=user_id,
chat_id=chat_id,
code_execution_mode=code_execution_mode,
jupyter_kernel_pool=jupyter_kernel_pool,
)
# Get each source_item (table, db, files...) from the grounding_source
_input = {"question": user_intent, "data_info": _concat_grounding_source()}
result = method(_input)
elif self._usage == "echarts":
# Python code generation for echarts interactive chart
method = PythonChain.from_echarts_prompt(
llm,
return_intermediate_steps=return_intermediate_steps,
verbose=True,
memory=self._memory,
user_id=user_id,
chat_id=chat_id,
code_execution_mode=code_execution_mode,
jupyter_kernel_pool=jupyter_kernel_pool,
)
_input = {"question": user_intent, "data_info": _concat_grounding_source()}
result = method(_input)
else:
raise ValueError(f"Usage {self._usage} not supported yet.")
else:
raise ValueError(f"Programming language {self._programming_language} not supported.")
return result
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~interactive_executor.py | from __future__ import annotations
from typing import Any, Optional, Sequence
from langchain.base_language import BaseLanguageModel
from langchain.tools.base import BaseTool
from real_agents.adapters.agent_helpers import AgentExecutor
from real_agents.data_agent.copilot import ConversationalChatAgent
from real_agents.plugins_agent.plugin import ConversationalPluginChatAgent
from real_agents.web_agent.webot import ConversationalWebotChatAgent
def initialize_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
continue_model: str = None,
agent_kwargs: Optional[dict] = None,
return_intermediate_steps: Optional[bool] = True,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_kwargs: Additional key word arguments to pass to the underlying agent_executor
return_intermediate_steps: Whether to return intermediate steps in the agent
**kwargs: Additional key word arguments passed to the agent executor
Returns:
An agent executor
"""
agent_kwargs = agent_kwargs or {}
agent_obj = ConversationalChatAgent.from_llm_and_tools(
llm=llm, tools=tools, continue_model=continue_model, **agent_kwargs
)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
return agent_executor
def initialize_plugin_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
continue_model: str = None,
agent_kwargs: Optional[dict] = None,
return_intermediate_steps: Optional[bool] = True,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent_kwargs: Additional key word arguments to pass to the underlying agent_executor
return_intermediate_steps: Whether to return intermediate steps in the agent
**kwargs: Additional key word arguments passed to the agent executor
Returns:
An agent executor
"""
agent_kwargs = agent_kwargs or {}
agent_obj = ConversationalPluginChatAgent.from_llm_and_tools(
llm=llm, tools=tools, continue_model=continue_model, **agent_kwargs
)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
return agent_executor
def initialize_webot_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
continue_model: str = None,
agent_kwargs: Optional[dict] = None,
return_intermediate_steps: Optional[bool] = True,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent_kwargs: Additional key word arguments to pass to the underlying agent_executor
return_intermediate_steps: Whether to return intermediate steps in the agent
**kwargs: Additional key word arguments passed to the agent executor
Returns:
An agent executor
"""
agent_kwargs = agent_kwargs or {}
agent_obj = ConversationalWebotChatAgent.from_llm_and_tools(
llm=llm, tools=tools, continue_model=continue_model, **agent_kwargs
)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
return agent_executor
| [] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~web_agent~webot.py | """An agent designed to hold a conversation in addition to using tools. (Specially designed for plugins model)"""
from __future__ import annotations
from typing import Any, List, Optional, Sequence, Tuple, Union
from pydantic import Extra, Field
from typing_extensions import override
from langchain.agents.agent import AgentOutputParser
from langchain.agents.utils import validate_tools_single_input
from langchain.base_language import BaseLanguageModel
from langchain.chains import LLMChain
from langchain.schema import (
AgentAction,
AgentFinish,
AIMessage,
BaseMessage,
BaseOutputParser,
HumanMessage
)
from langchain.callbacks.manager import (
Callbacks
)
from langchain.tools.base import BaseTool
from langchain.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from real_agents.adapters.agent_helpers.agent import Agent
from real_agents.adapters.agent_helpers.output_parser import ConversationOutputParser
from real_agents.web_agent.webot_prompt import (
PREFIX,
SUFFIX,
TEMPLATE_TOOL_RESPONSE,
fake_continue_prompt
)
from real_agents.adapters.data_model import DataModel, MessageDataModel
from real_agents.data_agent.copilot import ConversationalChatAgent
class ConversationalWebotChatAgent(ConversationalChatAgent): # fixme: change it to Agent will leads to bug, but why?
"""An agent designed to hold a conversation in addition to using plugin tool."""
output_parser: ConversationOutputParser = Field(
default_factory=ConversationOutputParser()
)
template_tool_response: str = TEMPLATE_TOOL_RESPONSE
continue_model: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.allow
arbitrary_types_allowed = True
@classmethod
def _get_default_output_parser(
cls, **kwargs: Any
) -> ConversationOutputParser:
return ConversationOutputParser()
@property
def _agent_type(self) -> str:
raise NotImplementedError
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought:"
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
super()._validate_tools(tools)
validate_tools_single_input(cls.__name__, tools)
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
system_message: str = PREFIX,
human_message: str = SUFFIX,
input_variables: Optional[List[str]] = None,
output_parser: Optional[BaseOutputParser] = None,
) -> BasePromptTemplate:
tool_strings = "\n".join([f"Name: {tool.name}\nDescription: {tool.description}" for tool in tools])
tool_names = ", ".join([tool.name for tool in tools])
_output_parser = output_parser or cls._get_default_output_parser()
format_instructions = _output_parser.get_format_instructions("webot")
format_instructions = format_instructions.format(tool_names=tool_names)
# system message
system_message = system_message + f"{tool_strings}\n\n{format_instructions}"
# human input
final_prompt = human_message
if input_variables is None:
input_variables = ["input", "chat_history", "agent_scratchpad"]
messages = [
SystemMessagePromptTemplate.from_template(system_message),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(final_prompt),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
def _construct_scratchpad(self, intermediate_steps: List[Tuple[AgentAction, str]]) -> List[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts: List[BaseMessage] = []
# Try to only use AI message for scratchpad
content = []
for idx, (action, full_observation) in enumerate(intermediate_steps):
content.append(MessageDataModel.extract_action_for_llm(action.log))
observation = full_observation
if isinstance(full_observation, DataModel):
llm_raw_observation = full_observation.get_llm_side_data()
observation = MessageDataModel.extract_tool_response_for_llm(llm_raw_observation, tool_style="plugin")
if idx == len(intermediate_steps) - 1:
tool_response = self.template_tool_response.format(
observation=str(observation), tool_names=self.allowed_tools
)
if idx == len(intermediate_steps) - 1:
content.append(tool_response)
else:
content.append(observation)
content_str = "\n".join(content)
thoughts.append(AIMessage(content=content_str))
if self.continue_model is not None and len(intermediate_steps) != 0:
thoughts.append(HumanMessage(content=fake_continue_prompt[self.continue_model]))
return thoughts
@override
def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
system_prompt = self.llm_chain.prompt.messages[0].format().content
system_prompt_tokens = MessageDataModel._count_tokens(
system_prompt
)
max_tokens = 8000
max_gen_tokens = 1000
# FIXME: need more accurate token limit calculation
full_inputs = MessageDataModel.truncate_chat_history(full_inputs, max_token=max_tokens - system_prompt_tokens - max_gen_tokens)
full_output = self.llm_chain.predict(**full_inputs)
return self.output_parser.parse(full_output)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callbacks: Callbacks = None,
output_parser: Optional[AgentOutputParser] = None,
system_message: str = PREFIX,
human_message: str = SUFFIX,
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
_output_parser = output_parser or cls._get_default_output_parser()
prompt = cls.create_prompt(
tools,
system_message=system_message,
human_message=human_message,
input_variables=input_variables,
output_parser=_output_parser,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
)
tool_names = [tool.name for tool in tools]
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
| [
"system_message8c26ecf6-eed0-4f86-ab43-cd899bdc38d0PLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | xlang-ai/OpenAgents | real_agents~adapters~data_model~message.py | import re
import textwrap
from typing import List, Dict, Any, Optional
from langchain.schema import BaseMessage
import tiktoken
# format of agent action
ACTION_FORMAT = """```json
{{
"action": "{_action}",
"action_input": "{_action_input}",
}}
```"""
# format of tool call(code) & tool output(response)
TOOL_FORMAT = {
"code": """<code>
{_intermediate_steps}
</code>
<output>
{_result}
</output>
""",
"plugin": """<plugin_call>
{_intermediate_steps}
</plugin_call>
<output>
{_result}
</output>
""",
}
# format to wrap tool call + tool output together
TOOL_RESPONSE_FORMAT = """[RESPONSE_BEGIN]
{_response}
[RESPONSE_END]
"""
class MessageDataModel:
"""A data model for Message Management, general purpose."""
@staticmethod
def _count_tokens(test_string: str) -> int:
"""copy of langchain _get_num_token_default_method"""
enc = tiktoken.get_encoding("cl100k_base")
tokens = len(enc.encode(test_string))
return tokens
@classmethod
def _get_num_tokens_from_messages(cls, buffer: List[BaseMessage]) -> int:
return sum([cls._count_tokens(m.content) for m in buffer])
@classmethod
def truncate_text(cls, raw_text: str, max_token: Optional[int] = 250, trunc_ratio: int = 0.5) -> str:
"""heuristic truncation for single long string & code"""
tokens = cls._count_tokens(raw_text)
if max_token is None or tokens <= max_token:
return raw_text
# assume we keep the first ratio * max_tokens and the (1 - ratio) * max_tokens
half_tokens = int(max_token * trunc_ratio)
lines = raw_text.strip().split("\n")
lines = [" ".join(line.split(" ")[:100]) for line in lines]
total_lines = len(lines)
# first half
left = 0
right = total_lines // 2
while left < right:
mid = (left + right) >> 1
text = "\n".join(lines[0:mid])
token = cls._count_tokens(text)
if token > half_tokens:
right = mid
else:
left = mid + 1
first_half = "\n".join(lines[0:right])
# last half
left = total_lines // 2 + 1
right = total_lines - 1
while left < right:
mid = (left + right) >> 1
text = "\n".join(lines[mid:])
token = cls._count_tokens(text)
if token > half_tokens:
right = mid
else:
left = mid + 1
second_half = "\n".join(lines[left:])
if first_half != "" or second_half != "":
return f"{first_half}\n...\n[too long to show]\n...\n{second_half}"
else:
# if len(first_half_list) == 0 and len(last_half_list) == 0:
# if all lines >= max_token, return last 100 words as truncated results.
return f"...\n[too long to show]\n...\n{raw_text[-100:]}"
@classmethod
def truncate_chat_history(cls, full_inputs: Dict[str, Any], max_token: int = 2500) -> Dict[str, Any]:
_input = full_inputs["input"]
agent_scratchpad = full_inputs["agent_scratchpad"]
agent_scratchpad = "\n".join([_.content for _ in agent_scratchpad])
_input_tokens = cls._count_tokens(_input)
_scratchpad_tokens = cls._count_tokens(agent_scratchpad)
left_tokens = max_token - _scratchpad_tokens - _input_tokens
chat_history = full_inputs["chat_history"]
curr_buffer_length = cls._get_num_tokens_from_messages(chat_history)
while len(chat_history) != 0 and curr_buffer_length > left_tokens:
chat_history.pop(0)
curr_buffer_length = cls._get_num_tokens_from_messages(chat_history)
full_inputs["chat_history"] = chat_history
return full_inputs
@staticmethod
def _extract_value(json_string: str, key: str) -> str:
pattern = re.compile(rf'"?{key}"?\s*:\s*("((?:[^"\\]|\\.)*)"|(\b[^,\s]*\b))', re.MULTILINE)
match = pattern.search(json_string)
if match:
result = match.group(1).replace('\\"', '"').replace("\\\\", "\\").strip('"').strip("'").strip()
# result = f"\"{result}\""
return result
raise ValueError(f"Could not find {key} in {json_string}")
@staticmethod
def _extract_response(
chat_history: str,
begin_marker: str = "[RESPONSE_BEGIN]",
end_marker: str = "[RESPONSE_END]",
ai_msg_marker: str = "AI:",
):
code_blocks = chat_history.split(ai_msg_marker)
pattern = r"\[RESPONSE_BEGIN\](.*?)\[RESPONSE_END\]"
cleaned_output = []
for code_block in code_blocks:
matches = re.findall(pattern, code_block, re.DOTALL)
if matches:
cleaned_output.append(matches[0].strip())
return "\n".join(cleaned_output)
@classmethod
def extract_action_for_llm(cls, text, max_token: int = 500) -> str:
"""Since Action should be fully inputted into an Agent, so we do not perform truncation here."""
action_format = ACTION_FORMAT
cleaned_output = text.strip()
try:
_action = cls._extract_value(cleaned_output, "action")
_action_input = cls._extract_value(cleaned_output, "action_input")
return action_format.format(_action=_action, _action_input=_action_input)
except Exception:
if cleaned_output.startswith("Action:"):
lines = cleaned_output.splitlines()
_action = lines[1].strip()
_action_input = textwrap.dedent("\n".join(lines[2:])).strip()
return action_format.format(_action=_action, _action_input=_action_input)
else:
_action_input = cleaned_output
return action_format.format(_action="Final Answer", _action_input=_action_input)
@classmethod
def extract_tool_response_for_llm(cls, text, tool_style: str = "code", max_token: int = 250) -> str:
wrap_format = TOOL_RESPONSE_FORMAT
tool_observation_format = TOOL_FORMAT[tool_style]
cleaned_output = text.strip()
if tool_style == "plugin":
max_token = None
try:
_result = cls.truncate_text(cls._extract_value(cleaned_output, "result"), max_token)
_intermediate_steps = cls.truncate_text(
cls._extract_value(cleaned_output, "intermediate_steps"), max_token
)
_intermediate_steps = _intermediate_steps.replace("\\n", "\n").strip("\n")
_result = _result.replace("\\n", "\n").strip("\n")
_response = tool_observation_format.format(_intermediate_steps=_intermediate_steps, _result=_result)
return wrap_format.format(_response=_response)
except:
if cleaned_output.startswith("Final Answer:"):
lines = cleaned_output.splitlines()
_response = textwrap.dedent("\n".join(lines[2:])).strip()
_response = cls.truncate_text(_response, max_token)
return wrap_format.format(_response=_response)
_response = cls.truncate_text(cleaned_output, max_token)
return wrap_format.format(_response=_response)
@classmethod
def extract_code_for_python_tool(cls, text: str, max_token: int = 2500, trunc_ratio: float = 0.2) -> str:
whole_code = MessageDataModel._extract_response(text)
trunc_code = cls.truncate_text(whole_code, max_token=max_token, trunc_ratio=trunc_ratio)
return trunc_code
@classmethod
def extract_code_for_sql_tool(cls, text: str, max_token: int = 2500, trunc_ratio: float = 0.2) -> str:
whole_code = MessageDataModel._extract_response(text)
trunc_code = cls.truncate_text(whole_code, max_token=max_token, trunc_ratio=trunc_ratio)
return trunc_code
| [] |
2024-01-10 | Louvivien/OCRandGPT-3 | backend.py | """My Python module.
This module contains functions and classes for performing various tasks.
"""
# Module code goes here
import sys
import fitz
from flask import Flask, render_template, request
import tempfile
import openai
sys.path.append('templates/frontend/module')
app = Flask(__name__, template_folder='templates')
# Set your OpenAI API key
openai.api_key = "xxxxxx"
@app.route('/', methods=['GET', 'POST'])
def extract_pdf_text():
"""
Extracts text from specific pages of a PDF file and returns it as a string.
Args:
pdf_file (FileStorage): The uploaded PDF file.
page_numbers (list): A list of integers representing the page numbers to extract.
Returns:
str: The extracted text.
"""
if request.method == 'POST':
# Get the uploaded PDF file and page numbers from the form data
pdf_file = request.files['file']
page_numbers = request.form['page_numbers']
# Split the page numbers by comma and convert them to integers
page_numbers = [int(x) for x in page_numbers.split(',')]
# Save the PDF file to a temporary location on the filesystem
with tempfile.TemporaryDirectory() as temp_dir:
pdf_path = f"{temp_dir}/temp.pdf"
pdf_file.save(pdf_path)
# Open the PDF file using PyMuPDF
pdf_doc = fitz.Document(pdf_path)
# Initialize an empty list to store the extracted text
extracted_text = []
# Iterate through the specified page numbers
for page_number in page_numbers:
# Make sure the page number is within the range of the PDF
if page_number > 0 and page_number <= pdf_doc.page_count:
# Load the page from the PDF document
page = pdf_doc.load_page(page_number - 1)
# Extract the text from the page
text = page.get_text()
# Add the extracted text to the list
extracted_text.append(text)
else:
# Add an error message to the list if the page number is out of range
extracted_text.append(f"Page {page_number} is not a valid page number.")
# Send the extracted text to ChatGPT and get the response
model = "text-davinci-003"
prompt = "summarize".join(extracted_text)
response = openai.Completion.create(
engine=model,
prompt=prompt,
max_tokens=150,
n=1,
stop=None,
temperature=0.5,
presence_penalty=1.0,
top_p=1.0,
)
chatbot_response = response.choices[0].text
# Render the HTML template with the extracted text and ChatGPT response
return render_template('frontend.html', extracted_text=extracted_text, chatbot_response=chatbot_response)
else:
# Render the HTML template for the GET request
return render_template('frontend.html')
if __name__ == '__main__':
app.run(debug=True)
| [] |
2024-01-10 | kurtseifried/ChatGPT-API | src~revChatGPT~V2.py | """
Official API for ChatGPT
"""
import asyncio
import json
import os
import sys
import httpx
import requests
import tiktoken
from OpenAIAuth.OpenAIAuth import OpenAIAuth
ENCODER = tiktoken.get_encoding("gpt2")
def get_max_tokens(prompt: str) -> int:
"""
Get the max tokens for a prompt
"""
return 4000 - len(ENCODER.encode(prompt))
class Message:
"""
A single exchange between the user and the bot
"""
def __init__(self, text: str, author: str) -> None:
self.text: str = text
self.author: str = author
class Conversation:
"""
A single conversation
"""
def __init__(self) -> None:
self.messages: list[Message] = []
CONVERSATION_BUFFER: int = int(os.environ.get("CONVERSATION_BUFFER") or 1500)
class Conversations:
"""
Conversation handler
"""
def __init__(self) -> None:
self.conversations: dict[str][Conversation] = {}
def add_message(self, message: Message, conversation_id: str) -> None:
"""
Adds a message to a conversation
"""
if conversation_id not in self.conversations:
self.conversations[conversation_id] = Conversation()
self.conversations[conversation_id].messages.append(message)
def get(self, conversation_id: str) -> str:
"""
Builds a conversation string from a conversation id
"""
if conversation_id not in self.conversations:
return ""
# Build conversation string from messages and check if it's too long
conversation = ""
for message in self.conversations[conversation_id].messages:
conversation += f"{message.author}: {message.text}<|im_sep|>\n\n"
if len(ENCODER.encode(conversation)) > 4000 - CONVERSATION_BUFFER:
self.purge_history(conversation_id)
return self.get(conversation_id)
return conversation
def purge_history(self, conversation_id: str, num: int = 1):
"""
Remove oldest messages from a conversation
"""
if conversation_id not in self.conversations:
return
self.conversations[conversation_id].messages = self.conversations[
conversation_id
].messages[num:]
def rollback(self, conversation_id: str, num: int = 1):
"""
Remove latest messages from a conversation
"""
if conversation_id not in self.conversations:
return
self.conversations[conversation_id].messages = self.conversations[
conversation_id
].messages[:-num]
def remove(self, conversation_id: str) -> None:
"""
Removes a conversation
"""
if conversation_id in self.conversations:
del self.conversations[conversation_id]
BASE_PROMPT = (
os.environ.get("BASE_PROMPT")
or """You are ChatGPT, a large language model by OpenAI. Respond conversationally\n\n\n"""
)
PROXY_URL = os.environ.get("PROXY_URL") or "https://chat.duti.tech"
class Chatbot:
"""
Handles everything seamlessly
"""
def __init__(
self,
email: str,
password: str,
paid: bool = False,
proxy=None,
insecure: bool = False,
session_token: str = None,
) -> None:
self.proxy = proxy
self.email: str = email
self.password: str = password
self.session_token = session_token
self.insecure: bool = insecure
self.api_key: str
self.paid: bool = paid
self.conversations = Conversations()
self.login(email, password, proxy, insecure, session_token)
async def ask(self, prompt: str, conversation_id: str = None) -> dict:
"""
Gets a response from the API
"""
if conversation_id is None:
conversation_id = "default"
self.conversations.add_message(
Message(prompt, "User"),
conversation_id=conversation_id,
)
conversation: str = self.conversations.get(conversation_id)
# Build request body
body = self.__get_config()
body["prompt"] = BASE_PROMPT + conversation + "ChatGPT: "
body["max_tokens"] = get_max_tokens(conversation)
async with httpx.AsyncClient(proxies=self.proxy if self.proxy else None).stream(
method="POST",
url=PROXY_URL + "/completions",
data=json.dumps(body),
headers={"Authorization": f"Bearer {self.api_key}"},
timeout=1080,
) as response:
full_result = ""
async for line in response.aiter_lines():
if response.status_code == 429:
print("error: " + "Too many requests")
raise Exception("Too many requests")
elif response.status_code == 523:
print("error: " + "Origin is unreachable. Ensure that you are authenticated and are using the correct pricing model.")
raise Exception("Origin is unreachable. Ensure that you are authenticated and are using the correct pricing model.")
elif response.status_code == 503:
print("error: " + "OpenAI error!")
raise Exception("OpenAI error!")
elif response.status_code != 200:
print("error: " + "Unknown error")
raise Exception("Unknown error")
line = line.strip()
if line == "\n" or line == "":
continue
if line == "data: [DONE]":
break
try:
# Remove "data: " from the start of the line
data = json.loads(line[6:])
if data is None:
continue
full_result += data["choices"][0]["text"].replace("<|im_end|>", "")
if "choices" not in data:
continue
yield data
except json.JSONDecodeError:
continue
self.conversations.add_message(
Message(full_result, "ChatGPT"),
conversation_id=conversation_id,
)
def __get_config(self) -> dict:
return {
"temperature": float(os.environ.get("TEMPERATURE") or 0.5),
"top_p": float(os.environ.get("TOP_P") or 1),
"stop": ["<|im_end|>", "<|im_sep|>"],
"presence_penalty": float(os.environ.get("PRESENCE_PENALTY") or 1.0),
"paid": self.paid,
"stream": True,
}
def login(self, email, password, proxy, insecure, session_token) -> None:
"""
Login to the API
"""
if not insecure:
auth = OpenAIAuth(email_address=email, password=password, proxy=proxy)
if session_token:
auth.session_token = session_token
auth.get_access_token()
self.api_key = auth.access_token
if self.api_key is None:
self.session_token = None
self.login(email, password, proxy, insecure, None)
return
auth.begin()
self.session_token = auth.session_token
self.api_key = auth.access_token
else:
auth_request = requests.post(
PROXY_URL + "/auth",
json={"email": email, "password": password},
timeout=10,
)
self.api_key = auth_request.json()["accessToken"]
def get_input(prompt):
"""
Multi-line input
"""
# Display the prompt
print(prompt, end="")
# Initialize an empty list to store the input lines
lines = []
# Read lines of input until the user enters an empty line
while True:
line = input()
if line == "":
break
lines.append(line)
# Join the lines, separated by newlines, and store the result
user_input = "\n".join(lines)
# Return the input
return user_input
async def main():
"""
Testing main function
"""
import argparse
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
parser = argparse.ArgumentParser()
parser.add_argument(
"-e",
"--email",
help="Your OpenAI email address",
required=False,
)
parser.add_argument(
"-p",
"--password",
help="Your OpenAI password",
required=False,
)
parser.add_argument(
"--paid",
help="Use the paid API",
action="store_true",
)
parser.add_argument(
"--proxy",
help="Use a proxy",
required=False,
type=str,
default=None,
)
parser.add_argument(
"--insecure-auth",
help="Use an insecure authentication method to bypass OpenAI's geo-blocking",
action="store_true",
)
parser.add_argument(
"--session_token",
help="Alternative to email and password authentication. Use this if you have Google/Microsoft account.",
required=False,
)
args = parser.parse_args()
if (args.email is None or args.password is None) and args.session_token is None:
print("error: " + "Please provide your email and password")
return
print("Logging in...")
chatbot = Chatbot(
args.email,
args.password,
paid=args.paid,
proxy=args.proxy,
insecure=args.insecure_auth,
session_token=args.session_token,
)
print("Logged in\n")
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
def commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this help message
!reset - Clear the current conversation
!rollback <int> - Remove the latest <int> messages from the conversation
!exit - Exit the program
""",
)
elif command == "!reset":
chatbot.conversations.remove("default")
print("Conversation cleared")
elif command.startswith("!rollback"):
try:
num = int(command.split(" ")[1])
chatbot.conversations.rollback("default", num)
print(f"Removed {num} messages from the conversation")
except IndexError:
print("Please specify the number of messages to remove")
except ValueError:
print("Please specify a valid number of messages to remove")
elif command == "!exit":
print("Exiting...")
sys.exit(0)
else:
return False
return True
try:
while True:
prompt = get_input("\nYou:\n")
if prompt.startswith("!"):
if commands(prompt):
continue
print("ChatGPT:")
async for line in chatbot.ask(prompt=prompt):
result = line["choices"][0]["text"].replace("<|im_end|>", "")
print(result, end="")
sys.stdout.flush()
print()
except KeyboardInterrupt:
print("Exiting...")
sys.exit(0)
if __name__ == "__main__":
asyncio.run(main())
| [
"You are ChatGPT, a large language model by OpenAI. Respond conversationally\n\n\n",
"ChatGPT",
"\nYou:\n",
"User",
"BASE_PROMPT"
] |
2024-01-10 | kurtseifried/ChatGPT-API | src~revChatGPT~V1.py | """
Standard ChatGPT
"""
import json
import logging
import sys
import uuid
from os import environ
from os import getenv
from os.path import exists
import requests
from OpenAIAuth.OpenAIAuth import OpenAIAuth
# Disable all logging
logging.basicConfig(level=logging.ERROR)
BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://chatgpt.duti.tech/"
class Error(Exception):
"""Base class for exceptions in this module."""
source: str
message: str
code: int
class Chatbot:
"""
Chatbot class for ChatGPT
"""
def __init__(
self,
config,
conversation_id=None,
parent_id=None,
) -> None:
self.config = config
self.session = requests.Session()
if "proxy" in config:
if isinstance(config["proxy"], str) is False:
raise Exception("Proxy must be a string!")
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
self.session.proxies.update(proxies)
if "verbose" in config:
if type(config["verbose"]) != bool:
raise Exception("Verbose must be a boolean!")
self.verbose = config["verbose"]
else:
self.verbose = False
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
if "email" not in config:
raise Exception("Email not found in config!")
if "password" not in config:
raise Exception("Password not found in config!")
self.__login()
def __refresh_headers(self, access_token):
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
def __login(self):
auth = OpenAIAuth(
email_address=self.config.get("email"),
password=self.config.get("password"),
proxy=self.config.get("proxy"),
)
auth.begin()
access_token = auth.get_access_token()
self.__refresh_headers(access_token)
def ask(
self,
prompt,
conversation_id=None,
parent_id=None,
# gen_title=True,
):
"""
Ask a question to the chatbot
:param prompt: String
:param conversation_id: UUID
:param parent_id: UUID
:param gen_title: Boolean
"""
if conversation_id is not None and parent_id is None:
self.__map_conversations()
if conversation_id is None:
conversation_id = self.conversation_id
if parent_id is None:
parent_id = (
self.parent_id
if conversation_id == self.conversation_id
else self.conversation_mapping[conversation_id]
)
# new_conv = conversation_id is None
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id or str(uuid.uuid4()),
"model": "text-davinci-002-render"
if not self.config.get("paid")
else "text-davinci-002-render-paid",
}
# new_conv = data["conversation_id"] is None
self.conversation_id_prev_queue.append(
data["conversation_id"],
) # for rollback
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=BASE_URL + "backend-api/conversation",
data=json.dumps(data),
timeout=360,
stream=True,
)
self.__check_response(response)
compounded_resp = ""
for line in response.iter_lines():
line = str(line)[2:-1]
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if line == "[DONE]":
break
# Try parse JSON
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
continue
message = line["message"]["content"]["parts"][0][len(compounded_resp) :]
compounded_resp += message
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
yield {
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
}
# if gen_title and new_conv:
# self.__gen_title(
# self.conversation_id,
# parent_id,
# )
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except TypeError:
return False
except KeyError:
return False
return True
def __check_response(self, response):
if response.status_code != 200:
print(response.text)
error = Error()
error.source = "OpenAI"
error.code = response.status_code
error.message = response.text
raise error
def get_conversations(self, offset=0, limit=20):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = BASE_URL + f"backend-api/conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
def get_msg_history(self, convo_id):
"""
Get message history
:param id: UUID of conversation
"""
url = BASE_URL + f"backend-api/conversation/{convo_id}"
response = self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data
# def __gen_title(self, convo_id, message_id):
# """
# Generate title for conversation
# """
# url = BASE_URL + f"backend-api/conversation/gen_title/{convo_id}"
# response = self.session.post(
# url,
# data=json.dumps(
# {"message_id": message_id, "model": "text-davinci-002-render"},
# ),
# )
# self.__check_response(response)
def change_title(self, convo_id, title):
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = BASE_URL + f"backend-api/conversation/{convo_id}"
response = self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
def delete_conversation(self, convo_id):
"""
Delete conversation
:param id: UUID of conversation
"""
url = BASE_URL + f"backend-api/conversation/{convo_id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def clear_conversations(self):
"""
Delete all conversations
"""
url = BASE_URL + "backend-api/conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
def __map_conversations(self):
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
def rollback_conversation(self, num=1) -> None:
"""
Rollback the conversation.
:param num: The number of messages to rollback
:return: None
"""
for _ in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
def get_input(prompt):
"""
Multiline input function.
"""
# Display the prompt
print(prompt, end="")
# Initialize an empty list to store the input lines
lines = []
# Read lines of input until the user enters an empty line
while True:
line = input()
if line == "":
break
lines.append(line)
# Join the lines, separated by newlines, and store the result
user_input = "\n".join(lines)
# Return the input
return user_input
def configure():
"""
Looks for a config file in the following locations:
"""
config_files = ["config.json"]
xdg_config_home = getenv("XDG_CONFIG_HOME")
if xdg_config_home:
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
user_home = getenv("HOME")
if user_home:
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
config_file = next((f for f in config_files if exists(f)), None)
if config_file:
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
def main(config):
"""
Main function for the chatGPT program.
"""
print("Logging in...")
chatbot = Chatbot(config)
while True:
prompt = get_input("\nYou:\n")
if prompt.startswith("!"):
if prompt == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
""",
)
continue
elif prompt == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
continue
elif prompt == "!config":
print(json.dumps(chatbot.config, indent=4))
continue
elif prompt.startswith("!rollback"):
# Default to 1 rollback if no number is specified
try:
rollback = int(prompt.split(" ")[1])
except IndexError:
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
continue
elif prompt.startswith("!setconversation"):
try:
chatbot.config["conversation"] = prompt.split(" ")[1]
print("Conversation has been changed")
except IndexError:
print("Please include conversation UUID in command")
continue
elif prompt == "!exit":
break
print("Chatbot: ")
for data in chatbot.ask(
prompt,
conversation_id=chatbot.config.get("conversation"),
parent_id=chatbot.config.get("parent_id"),
):
print(data["message"], end="")
sys.stdout.flush()
print()
# print(message["message"])
if __name__ == "__main__":
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
main(configure())
| [
"text",
"\nYou:\n",
"content_type"
] |
2024-01-10 | dlt-hub/dlt-pipeline-pdf-invoice-tracking | invoice_tracking_pipeline~invoice_tracking.py | import dlt
import os
from typing import Dict, List
from google_drive_connector import download_pdf_from_google_drive, get_pdf_uris
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.indexes import VectorstoreIndexCreator
# Add your own folder id (from Google Drive URL) (or use config.toml)
folder_id=None
def safely_query_index(index, query):
try:
return index.query(query).strip()
except Exception:
return []
def process_one_pdf_to_structured(path_to_pdf:str) -> Dict:
loader = UnstructuredPDFLoader(path_to_pdf)
index = VectorstoreIndexCreator().from_loaders([loader])
return {
"file_name": path_to_pdf.split("/")[-1],
"recipient_company_name": safely_query_index(index, "Who is the recipient of the invoice? Just return the name"),
"invoice_amount": safely_query_index(index, "What is the total amount of the invoice? Just return the amount as decimal number, no currency or text"),
"invoice_date": safely_query_index(index, "What is the date of the invoice? Just return the date"),
"invoice_number": safely_query_index(index, "What is the invoice number? Just return the number"),
"service_description": safely_query_index(index, "What is the description of the service that this invoice is for? Just return the description"),
}
def process_all_pdfs_to_structured(path_to_pdfs:str)->List[Dict]:
for file in os.listdir(path_to_pdfs):
if file.endswith(".pdf"):
yield process_one_pdf_to_structured(os.path.join(path_to_pdfs,file))
return []
def download_and_process_one_pdf(file_id, file_name, local_folder_to_store_pdfs:str="./data/invoices", delete_after_extraction=True):
download_pdf_from_google_drive(file_id, file_name, local_folder_to_store_pdfs)
structured_data = process_one_pdf_to_structured(os.path.join(local_folder_to_store_pdfs, file_name))
if delete_after_extraction:
os.remove(os.path.join(local_folder_to_store_pdfs, file_name))
yield structured_data
@dlt.source
def invoice_tracking_source(drive_folder_id=dlt.config.value, delete_after_extraction=True):
return invoice_tracking_resources(drive_folder_id, delete_after_extraction)
@dlt.resource(write_disposition="append")
def invoice_tracking_resources(drive_folder_id, delete_after_extraction):
uris = get_pdf_uris(drive_folder_id)
for file_name, file_id in uris.items():
yield download_and_process_one_pdf(file_id, file_name, delete_after_extraction=delete_after_extraction)
if __name__ == "__main__":
pipeline = dlt.pipeline(pipeline_name="invoice_tracking", destination="duckdb", dataset_name="invoice_tracking_data")
# print(list(invoice_tracking_source()))
load_info = pipeline.run(invoice_tracking_source())
print(load_info)
| [] |
2024-01-10 | danny-avila/LibreGPT | models~index_model.py | import functools
import os
import random
import tempfile
import traceback
import asyncio
from collections import defaultdict
import aiohttp
import discord
import aiofiles
import openai
import tiktoken
from functools import partial
from typing import List, Optional
from pathlib import Path
from datetime import date
from discord import InteractionResponse, Interaction
from discord.ext import pages
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAIChat
from langchain.memory import ConversationBufferMemory
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.node_parser import SimpleNodeParser
from llama_index.schema import NodeRelationship
from llama_index.indices.query.query_transform import StepDecomposeQueryTransform
from llama_index.langchain_helpers.agents import (
IndexToolConfig,
LlamaToolkit,
create_llama_chat_agent,
)
from llama_index.prompts.chat_prompts import CHAT_REFINE_PROMPT
from llama_index.readers import YoutubeTranscriptReader
from llama_index.readers.schema.base import Document
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index.retrievers import VectorIndexRetriever, TreeSelectLeafRetriever
from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine
from llama_index import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
QuestionAnswerPrompt,
BeautifulSoupWebReader,
GPTTreeIndex,
GoogleDocsReader,
MockLLMPredictor,
OpenAIEmbedding,
GithubRepositoryReader,
MockEmbedding,
download_loader,
LLMPredictor,
ServiceContext,
StorageContext,
load_index_from_storage,
get_response_synthesizer,
)
from llama_index.schema import TextNode
from llama_index.storage.docstore.types import RefDocInfo
from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
from llama_index.composability import ComposableGraph
from models.embed_statics_model import EmbedStatics
from models.openai_model import Models
from models.check_model import UrlCheck
from services.environment_service import EnvService
SHORT_TO_LONG_CACHE = {}
MAX_DEEP_COMPOSE_PRICE = EnvService.get_max_deep_compose_price()
EpubReader = download_loader("EpubReader")
MarkdownReader = download_loader("MarkdownReader")
RemoteReader = download_loader("RemoteReader")
RemoteDepthReader = download_loader("RemoteDepthReader")
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
node_parser = SimpleNodeParser.from_defaults(
text_splitter=TokenTextSplitter(chunk_size=256, chunk_overlap=20)
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
embed_model=embedding_model,
callback_manager=callback_manager,
node_parser=node_parser,
)
def get_and_query(
user_id,
index_storage,
query,
response_mode,
nodes,
child_branch_factor,
service_context,
multistep,
):
index: [GPTVectorStoreIndex, GPTTreeIndex] = index_storage[
user_id
].get_index_or_throw()
if isinstance(index, GPTTreeIndex):
retriever = TreeSelectLeafRetriever(
index=index,
child_branch_factor=child_branch_factor,
service_context=service_context,
)
else:
retriever = VectorIndexRetriever(
index=index, similarity_top_k=nodes, service_context=service_context
)
response_synthesizer = get_response_synthesizer(
response_mode=response_mode,
use_async=True,
refine_template=CHAT_REFINE_PROMPT,
service_context=service_context,
)
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
multistep_query_engine = MultiStepQueryEngine(
query_engine=query_engine,
query_transform=StepDecomposeQueryTransform(multistep),
index_summary="Provides information about everything you need to know about this topic, use this to answer the question.",
)
if multistep:
response = multistep_query_engine.query(query)
else:
response = query_engine.query(query)
return response
class IndexData:
def __init__(self):
self.queryable_index = None
self.individual_indexes = []
# A safety check for the future
def get_index_or_throw(self):
if not self.queryable():
raise Exception(
"An index access was attempted before an index was created. This is a programmer error, please report this to the maintainers."
)
return self.queryable_index
def queryable(self):
return self.queryable_index is not None
def has_indexes(self, user_id):
try:
return (
len(os.listdir(EnvService.find_shared_file(f"indexes/{user_id}"))) > 0
)
except Exception:
return False
def has_search_indexes(self, user_id):
try:
return (
len(
os.listdir(EnvService.find_shared_file(f"indexes/{user_id}_search"))
)
> 0
)
except Exception:
return False
def add_index(self, index, user_id, file_name):
self.individual_indexes.append(index)
self.queryable_index = index
# Create a folder called "indexes/{USER_ID}" if it doesn't exist already
Path(f"{EnvService.save_path()}/indexes/{user_id}").mkdir(
parents=True, exist_ok=True
)
# Save the index to file under the user id
file = f"{date.today().month}_{date.today().day}_{file_name}"
# If file is > 93 in length, cut it off to 93
if len(file) > 93:
file = file[:93]
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ f"{str(user_id)}"
/ f"{file}"
)
def reset_indexes(self, user_id):
self.individual_indexes = []
self.queryable_index = None
# Delete the user indexes
try:
# First, clear all the files inside it
for file in os.listdir(EnvService.find_shared_file(f"indexes/{user_id}")):
try:
os.remove(EnvService.find_shared_file(f"indexes/{user_id}/{file}"))
except:
traceback.print_exc()
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{user_id}_search")
):
try:
os.remove(
EnvService.find_shared_file(f"indexes/{user_id}_search/{file}")
)
except:
traceback.print_exc()
except Exception:
traceback.print_exc()
class Index_handler:
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
node_parser = SimpleNodeParser.from_defaults(
text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20)
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
embed_model=embedding_model,
callback_manager=callback_manager,
node_parser=node_parser,
)
def __init__(self, bot, usage_service):
self.bot = bot
self.openai_key = os.getenv("OPENAI_TOKEN")
self.index_storage = defaultdict(IndexData)
self.loop = asyncio.get_running_loop()
self.usage_service = usage_service
self.qaprompt = QuestionAnswerPrompt(
"Context information is below. The text '<|endofstatement|>' is used to separate chat entries and make it easier for you to understand the context\n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Never say '<|endofstatement|>'\n"
"Given the context information and not prior knowledge, "
"answer the question: {query_str}\n"
)
self.EMBED_CUTOFF = 2000
self.index_chat_chains = {}
async def rename_index(self, ctx, original_path, rename_path):
"""Command handler to rename a user index"""
index_file = EnvService.find_shared_file(original_path)
if not index_file:
return False
# Rename the file at f"indexes/{ctx.user.id}/{user_index}" to f"indexes/{ctx.user.id}/{new_name}" using Pathlib
try:
Path(original_path).rename(rename_path)
return True
except Exception as e:
traceback.print_exc()
return False
async def get_is_in_index_chat(self, ctx):
return ctx.channel.id in self.index_chat_chains
async def execute_index_chat_message(self, ctx, message):
if ctx.channel.id not in self.index_chat_chains:
return None
if message.lower() in ["stop", "end", "quit", "exit"]:
await ctx.reply("Ending chat session.")
self.index_chat_chains.pop(ctx.channel.id)
# close the thread
thread = await self.bot.fetch_channel(ctx.channel.id)
await thread.edit(name="Closed-GPT")
await thread.edit(archived=True)
return "Ended chat session."
agent_output = await self.loop.run_in_executor(
None, partial(self.index_chat_chains[ctx.channel.id].run, message)
)
return agent_output
async def start_index_chat(self, ctx, search, user, model):
if search:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.user.id}_search/{search}"
)
elif user:
index_file = EnvService.find_shared_file(f"indexes/{ctx.user.id}/{user}")
assert index_file is not None
preparation_message = await ctx.channel.send(
embed=EmbedStatics.get_index_chat_preparation_message()
)
index = await self.loop.run_in_executor(
None, partial(self.index_load_file, index_file)
)
summary_response = await self.loop.run_in_executor(
None,
partial(
index.as_query_engine().query, "What is a summary of this document?"
),
)
query_engine = index.as_query_engine(similarity_top_k=3)
tool_config = IndexToolConfig(
query_engine=query_engine,
name=f"Vector Index",
description=f"useful for when you want to answer queries about the external data you're connected to. The data you're connected to is: {summary_response}",
tool_kwargs={"return_direct": True},
)
toolkit = LlamaToolkit(
index_configs=[tool_config],
)
memory = ConversationBufferMemory(memory_key="chat_history")
llm = ChatOpenAI(model=model, temperature=0)
agent_chain = create_llama_chat_agent(toolkit, llm, memory=memory, verbose=True)
embed_title = f"{ctx.user.name}'s data-connected conversation with GPT"
# Get only the last part after the last / of the index_file
try:
index_file_name = str(index_file).split("/")[-1]
except:
index_file_name = index_file
message_embed = discord.Embed(
title=embed_title,
description=f"The agent is connected to the data index named {index_file_name}\nModel: {model}",
color=0x00995B,
)
message_embed.set_thumbnail(url="https://i.imgur.com/7V6apMT.png")
message_embed.set_footer(
text="Data Chat", icon_url="https://i.imgur.com/7V6apMT.png"
)
message_thread = await ctx.send(embed=message_embed)
thread = await message_thread.create_thread(
name=ctx.user.name + "'s data-connected conversation with GPT",
auto_archive_duration=60,
)
await ctx.respond("Conversation started.")
try:
await preparation_message.delete()
except:
pass
self.index_chat_chains[thread.id] = agent_chain
async def paginate_embed(self, response_text):
"""Given a response text make embed pages and return a list of the pages."""
response_text = [
response_text[i : i + self.EMBED_CUTOFF]
for i in range(0, len(response_text), self.EMBED_CUTOFF)
]
pages = []
first = False
# Send each chunk as a message
for count, chunk in enumerate(response_text, start=1):
if not first:
page = discord.Embed(
title=f"Index Query Results",
description=chunk,
)
first = True
else:
page = discord.Embed(
title=f"Page {count}",
description=chunk,
)
pages.append(page)
return pages
def index_file(
self, file_path, service_context, suffix=None
) -> GPTVectorStoreIndex:
if suffix and suffix == ".md":
loader = MarkdownReader()
document = loader.load_data(file_path)
elif suffix and suffix == ".epub":
epub_loader = EpubReader()
document = epub_loader.load_data(file_path)
else:
document = SimpleDirectoryReader(input_files=[file_path]).load_data()
index = GPTVectorStoreIndex.from_documents(
document, service_context=service_context, use_async=True
)
return index
def index_gdoc(self, doc_id, service_context) -> GPTVectorStoreIndex:
document = GoogleDocsReader().load_data(doc_id)
index = GPTVectorStoreIndex.from_documents(
document, service_context=service_context, use_async=True
)
return index
def index_youtube_transcript(self, link, service_context):
try:
documents = YoutubeTranscriptReader().load_data(ytlinks=[link])
except Exception as e:
raise ValueError(f"The youtube transcript couldn't be loaded: {e}")
index = GPTVectorStoreIndex.from_documents(
documents,
service_context=service_context,
use_async=True,
)
return index
def index_github_repository(self, link, service_context):
# Extract the "owner" and the "repo" name from the github link.
owner = link.split("/")[3]
repo = link.split("/")[4]
try:
documents = GithubRepositoryReader(owner=owner, repo=repo).load_data(
branch="main"
)
except KeyError:
documents = GithubRepositoryReader(owner=owner, repo=repo).load_data(
branch="master"
)
index = GPTVectorStoreIndex.from_documents(
documents,
service_context=service_context,
use_async=True,
)
return index
def index_load_file(self, file_path) -> [GPTVectorStoreIndex, ComposableGraph]:
storage_context = StorageContext.from_defaults(persist_dir=file_path)
index = load_index_from_storage(storage_context)
return index
def index_discord(self, document, service_context) -> GPTVectorStoreIndex:
index = GPTVectorStoreIndex.from_documents(
document,
service_context=service_context,
use_async=True,
)
return index
async def index_pdf(self, url) -> list[Document]:
# Download the PDF at the url and save it to a tempfile
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
data = await response.read()
f = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
f.write(data)
f.close()
else:
return "An error occurred while downloading the PDF."
# Get the file path of this tempfile.NamedTemporaryFile
# Save this temp file to an actual file that we can put into something else to read it
documents = SimpleDirectoryReader(input_files=[f.name]).load_data()
# Delete the temporary file
return documents
async def index_webpage(self, url, service_context) -> GPTVectorStoreIndex:
# First try to connect to the URL to see if we can even reach it.
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, timeout=5) as response:
# Add another entry to links from all_links if the link is not already in it to compensate for the failed request
if response.status not in [200, 203, 202, 204]:
raise ValueError(
"Invalid URL or could not connect to the provided URL."
)
else:
# Detect if the link is a PDF, if it is, we load it differently
if response.headers["Content-Type"] == "application/pdf":
documents = await self.index_pdf(url)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context,
use_async=True,
),
)
return index
except:
traceback.print_exc()
raise ValueError("Could not load webpage")
documents = BeautifulSoupWebReader(
website_extractor=DEFAULT_WEBSITE_EXTRACTOR
).load_data(urls=[url])
# index = GPTVectorStoreIndex(documents, embed_model=embed_model, use_async=True)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context,
use_async=True,
),
)
return index
def reset_indexes(self, user_id):
self.index_storage[user_id].reset_indexes(user_id)
async def set_file_index(
self, ctx: discord.ApplicationContext, file: discord.Attachment, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
type_to_suffix_mappings = {
"text/plain": ".txt",
"text/csv": ".csv",
"application/pdf": ".pdf",
"application/json": ".json",
"image/png": ".png",
"image/": ".jpg",
"ms-powerpoint": ".ppt",
"presentationml.presentation": ".pptx",
"ms-excel": ".xls",
"spreadsheetml.sheet": ".xlsx",
"msword": ".doc",
"wordprocessingml.document": ".docx",
"audio/": ".mp3",
"video/": ".mp4",
"epub": ".epub",
"markdown": ".md",
"html": ".html",
}
# For when content type doesnt get picked up by discord.
secondary_mappings = {
".epub": ".epub",
}
try:
# First, initially set the suffix to the suffix of the attachment
suffix = None
if file.content_type:
# Apply the suffix mappings to the file
for key, value in type_to_suffix_mappings.items():
if key in file.content_type:
suffix = value
break
if not suffix:
await ctx.send("This file type is not supported.")
return
else:
for key, value in secondary_mappings.items():
if key in file.filename:
suffix = value
break
if not suffix:
await ctx.send(
"Could not determine the file type of the attachment, attempting a dirty index.."
)
return
# Send indexing message
response = await ctx.respond(
embed=EmbedStatics.build_index_progress_embed()
)
async with aiofiles.tempfile.TemporaryDirectory() as temp_path:
async with aiofiles.tempfile.NamedTemporaryFile(
suffix=suffix, dir=temp_path, delete=False
) as temp_file:
await file.save(temp_file.name)
index = await self.loop.run_in_executor(
None,
partial(
self.index_file,
Path(temp_file.name),
service_context,
suffix,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
file_name = file.filename
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
await response.edit(
embed=EmbedStatics.get_index_set_success_embed(str(price))
)
except Exception as e:
await ctx.channel.send(
embed=EmbedStatics.get_index_set_failure_embed(str(e))
)
traceback.print_exc()
async def set_link_index_recurse(
self, ctx: discord.ApplicationContext, link: str, depth, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
response = await ctx.respond(embed=EmbedStatics.build_index_progress_embed())
try:
# Pre-emptively connect and get the content-type of the response
try:
async with aiohttp.ClientSession() as session:
async with session.get(link, timeout=2) as _response:
print(_response.status)
if _response.status == 200:
content_type = _response.headers.get("content-type")
else:
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL."
)
)
return
except Exception as e:
traceback.print_exc()
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL. "
+ str(e)
)
)
return
# Check if the link contains youtube in it
loader = RemoteDepthReader(depth=depth)
documents = await self.loop.run_in_executor(
None, partial(loader.load_data, [link])
)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex,
documents=documents,
service_context=service_context,
use_async=True,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
# Make the url look nice, remove https, useless stuff, random characters
file_name = (
link.replace("https://", "")
.replace("http://", "")
.replace("www.", "")
.replace("/", "_")
.replace("?", "_")
.replace("&", "_")
.replace("=", "_")
.replace("-", "_")
.replace(".", "_")
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
except ValueError as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
except Exception as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
await response.edit(embed=EmbedStatics.get_index_set_success_embed(price))
async def set_link_index(
self, ctx: discord.ApplicationContext, link: str, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
response = await ctx.respond(embed=EmbedStatics.build_index_progress_embed())
try:
# Pre-emptively connect and get the content-type of the response
try:
async with aiohttp.ClientSession() as session:
async with session.get(link, timeout=2) as _response:
print(_response.status)
if _response.status == 200:
content_type = _response.headers.get("content-type")
else:
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL."
)
)
return
except Exception as e:
traceback.print_exc()
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL. "
+ str(e)
)
)
return
# Check if the link contains youtube in it
if await UrlCheck.check_youtube_link(link):
index = await self.loop.run_in_executor(
None, partial(self.index_youtube_transcript, link, service_context)
)
elif "github" in link:
index = await self.loop.run_in_executor(
None, partial(self.index_github_repository, link, service_context)
)
else:
index = await self.index_webpage(link, service_context)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.embedding_token_counts, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
# Make the url look nice, remove https, useless stuff, random characters
file_name = (
link.replace("https://", "")
.replace("http://", "")
.replace("www.", "")
.replace("/", "_")
.replace("?", "_")
.replace("&", "_")
.replace("=", "_")
.replace("-", "_")
.replace(".", "_")
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
except ValueError as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
except Exception as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
await response.edit(embed=EmbedStatics.get_index_set_success_embed(price))
async def set_discord_index(
self,
ctx: discord.ApplicationContext,
channel: discord.TextChannel,
user_api_key,
message_limit: int = 2500,
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
document = await self.load_data(
channel_ids=[channel.id], limit=message_limit, oldest_first=False
)
index = await self.loop.run_in_executor(
None, partial(self.index_discord, document, service_context)
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except Exception:
traceback.print_exc()
price = "Unknown"
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, channel.name)
await ctx.respond(embed=EmbedStatics.get_index_set_success_embed(price))
except Exception as e:
await ctx.respond(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
async def load_index(
self, ctx: discord.ApplicationContext, index, server, search, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
if server:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.guild.id}/{index}"
)
elif search:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.user.id}_search/{index}"
)
else:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.user.id}/{index}"
)
index = await self.loop.run_in_executor(
None, partial(self.index_load_file, index_file)
)
self.index_storage[ctx.user.id].queryable_index = index
await ctx.respond(embed=EmbedStatics.get_index_load_success_embed())
except Exception as e:
traceback.print_exc()
await ctx.respond(embed=EmbedStatics.get_index_load_failure_embed(str(e)))
async def index_to_docs(
self, old_index, chunk_size: int = 4000, chunk_overlap: int = 200
) -> List[Document]:
documents = []
docstore = old_index.docstore
ref_docs = old_index.ref_doc_info
for document in ref_docs.values():
text = ""
for node in document.node_ids:
node = docstore.get_node(node)
text += f"{node.text} "
text_splitter = TokenTextSplitter(
separator=" ", chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
text_chunks = text_splitter.split_text(text)
for chunk_text in text_chunks:
new_doc = Document(text=chunk_text, extra_info=document.metadata)
documents.append(new_doc)
return documents
async def compose_indexes(self, user_id, indexes, name, deep_compose):
# Load all the indexes first
index_objects = []
for _index in indexes:
try:
index_file = EnvService.find_shared_file(f"indexes/{user_id}/{_index}")
except ValueError:
index_file = EnvService.find_shared_file(
f"indexes/{user_id}_search/{_index}"
)
index = await self.loop.run_in_executor(
None, partial(self.index_load_file, index_file)
)
index_objects.append(index)
llm_predictor = LLMPredictor(
llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
)
# For each index object, add its documents to a GPTTreeIndex
if deep_compose:
documents = []
for _index in index_objects:
documents.extend(await self.index_to_docs(_index, 256, 20))
embedding_model = OpenAIEmbedding()
llm_predictor_mock = MockLLMPredictor()
embedding_model_mock = MockEmbedding(1536)
token_counter_mock = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
callback_manager_mock = CallbackManager([token_counter_mock])
service_context_mock = ServiceContext.from_defaults(
llm_predictor=llm_predictor_mock,
embed_model=embedding_model_mock,
callback_manager=callback_manager_mock,
)
# Run the mock call first
await self.loop.run_in_executor(
None,
partial(
GPTTreeIndex.from_documents,
documents=documents,
service_context=service_context_mock,
),
)
total_usage_price = await self.usage_service.get_price(
token_counter_mock.total_llm_token_count,
"turbo", # TODO Enable again when tree indexes are fixed
) + await self.usage_service.get_price(
token_counter_mock.total_embedding_token_count, "embedding"
)
print("The total composition price is: ", total_usage_price)
if total_usage_price > MAX_DEEP_COMPOSE_PRICE:
raise ValueError(
"Doing this deep search would be prohibitively expensive. Please try a narrower search scope."
)
tree_index = await self.loop.run_in_executor(
None,
partial(
GPTTreeIndex.from_documents,
documents=documents,
service_context=self.service_context,
use_async=True,
),
)
await self.usage_service.update_usage(
self.token_counter.total_llm_token_count, "turbo"
)
await self.usage_service.update_usage(
self.token_counter.total_embedding_token_count, "embedding"
)
# Now we have a list of tree indexes, we can compose them
if not name:
name = f"{date.today().month}_{date.today().day}_composed_deep_index"
# Save the composed index
tree_index.storage_context.persist(
persist_dir=EnvService.save_path() / "indexes" / str(user_id) / name
)
self.index_storage[user_id].queryable_index = tree_index
return total_usage_price
else:
documents = []
for _index in index_objects:
documents.extend(await self.index_to_docs(_index))
simple_index = await self.loop.run_in_executor(
None,
partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context,
use_async=True,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
if not name:
name = f"{date.today().month}_{date.today().day}_composed_index"
# Save the composed index
simple_index.storage_context.persist(
persist_dir=EnvService.save_path() / "indexes" / str(user_id) / name
)
self.index_storage[user_id].queryable_index = simple_index
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
price = "Unknown"
return price
async def backup_discord(
self, ctx: discord.ApplicationContext, user_api_key, message_limit
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
channel_ids: List[int] = []
for c in ctx.guild.text_channels:
channel_ids.append(c.id)
document = await self.load_data(
channel_ids=channel_ids, limit=message_limit, oldest_first=False
)
index = await self.loop.run_in_executor(
None, partial(self.index_discord, document, service_context)
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except Exception:
traceback.print_exc()
price = "Unknown"
Path(EnvService.save_path() / "indexes" / str(ctx.guild.id)).mkdir(
parents=True, exist_ok=True
)
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ str(ctx.guild.id)
/ f"{ctx.guild.name.replace(' ', '-')}_{date.today().month}_{date.today().day}"
)
await ctx.respond(embed=EmbedStatics.get_index_set_success_embed(price))
except Exception as e:
await ctx.respond(embed=EmbedStatics.get_index_set_failure_embed((str(e))))
traceback.print_exc()
async def query(
self,
ctx: discord.ApplicationContext,
query: str,
response_mode,
nodes,
user_api_key,
child_branch_factor,
model,
multistep,
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=model))
ctx_response = await ctx.respond(
embed=EmbedStatics.build_index_query_progress_embed(query)
)
try:
token_counter.reset_counts()
response = await self.loop.run_in_executor(
None,
partial(
get_and_query,
ctx.user.id,
self.index_storage,
query,
response_mode,
nodes,
child_branch_factor,
service_context=service_context,
multistep=llm_predictor if multistep else None,
),
)
print("The last token usage was ", token_counter.total_llm_token_count)
await self.usage_service.update_usage(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
total_price = round(
await self.usage_service.get_price(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
+ await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
),
6,
)
except:
total_price = "Unknown"
query_response_message = f"**Query:**\n\n`{query.strip()}`\n\n**Query response:**\n\n{response.response.strip()}"
query_response_message = query_response_message.replace(
"<|endofstatement|>", ""
)
embed_pages = await self.paginate_embed(query_response_message)
paginator = pages.Paginator(
pages=embed_pages,
timeout=None,
author_check=False,
)
await ctx_response.edit(
embed=EmbedStatics.build_index_query_success_embed(query, total_price)
)
await paginator.respond(ctx.interaction)
except Exception:
traceback.print_exc()
await ctx_response.edit(
embed=EmbedStatics.get_index_query_failure_embed(
"Failed to send query. You may not have an index set, load an index with /index load"
)
)
# Extracted functions from DiscordReader
async def read_channel(
self, channel_id: int, limit: Optional[int], oldest_first: bool
) -> str:
"""Async read channel."""
messages: List[discord.Message] = []
try:
channel = self.bot.get_channel(channel_id)
print(f"Added {channel.name} from {channel.guild.name}")
# only work for text channels for now
if not isinstance(channel, discord.TextChannel):
raise ValueError(
f"Channel {channel_id} is not a text channel. "
"Only text channels are supported for now."
)
# thread_dict maps thread_id to thread
thread_dict = {}
for thread in channel.threads:
thread_dict[thread.id] = thread
async for msg in channel.history(limit=limit, oldest_first=oldest_first):
if msg.author.bot:
pass
else:
messages.append(msg)
if msg.id in thread_dict:
thread = thread_dict[msg.id]
async for thread_msg in thread.history(
limit=limit, oldest_first=oldest_first
):
messages.append(thread_msg)
except Exception as e:
print("Encountered error: " + str(e))
channel = self.bot.get_channel(channel_id)
msg_txt_list = [
f"user:{m.author.display_name}, content:{m.content}" for m in messages
]
return ("<|endofstatement|>\n\n".join(msg_txt_list), channel.name)
async def load_data(
self,
channel_ids: List[int],
limit: Optional[int] = None,
oldest_first: bool = True,
) -> List[Document]:
"""Load data from the input directory.
Args:
channel_ids (List[int]): List of channel ids to read.
limit (Optional[int]): Maximum number of messages to read.
oldest_first (bool): Whether to read oldest messages first.
Defaults to `True`.
Returns:
List[Document]: List of documents.
"""
results: List[Document] = []
for channel_id in channel_ids:
if not isinstance(channel_id, int):
raise ValueError(
f"Channel id {channel_id} must be an integer, "
f"not {type(channel_id)}."
)
(channel_content, channel_name) = await self.read_channel(
channel_id, limit=limit, oldest_first=oldest_first
)
results.append(
Document(
text=channel_content, extra_info={"channel_name": channel_name}
)
)
return results
async def compose(self, ctx: discord.ApplicationContext, name, user_api_key):
# Send the ComposeModal
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
if not self.index_storage[ctx.user.id].has_indexes(ctx.user.id):
await ctx.respond(
embed=EmbedStatics.get_index_compose_failure_embed(
"You must have at least one index to compose."
)
)
return
await ctx.respond(
"Select the index(es) to compose. You can compose multiple indexes together, you can also Deep Compose a single index.",
view=ComposeModal(self, ctx.user.id, name),
ephemeral=True,
)
class ComposeModal(discord.ui.View):
def __init__(self, index_cog, user_id, name=None, deep=None) -> None:
super().__init__()
# Get the argument named "user_key_db" and save it as USER_KEY_DB
self.index_cog = index_cog
self.user_id = user_id
self.deep = deep
# Get all the indexes for the user
self.indexes = [
file
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{str(user_id)}/")
)
]
if index_cog.index_storage[user_id].has_search_indexes(user_id):
self.indexes.extend(
[
file
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{str(user_id)}_search/")
)
]
)
print("Found the indexes, they are ", self.indexes)
# Map everything into the short to long cache
for index in self.indexes:
if len(index) > 93:
index_name = index[:93] + "-" + str(random.randint(0000, 9999))
SHORT_TO_LONG_CACHE[index_name] = index
else:
SHORT_TO_LONG_CACHE[index[:99]] = index
# Reverse the SHORT_TO_LONG_CACHE index
LONG_TO_SHORT_CACHE = {v: k for k, v in SHORT_TO_LONG_CACHE.items()}
# A text entry field for the name of the composed index
self.name = name
# A discord UI select menu with all the indexes. Limited to 25 entries. For the label field in the SelectOption,
# cut it off at 100 characters to prevent the message from being too long
self.index_select = discord.ui.Select(
placeholder="Select index(es) to compose",
options=[
discord.SelectOption(
label=LONG_TO_SHORT_CACHE[index], value=LONG_TO_SHORT_CACHE[index]
)
for index in self.indexes
][0:25],
max_values=len(self.indexes) if len(self.indexes) < 25 else 25,
min_values=1,
)
# Add the select menu to the modal
self.add_item(self.index_select)
# If we have more than 25 entries, add more Select fields as neccessary
self.extra_index_selects = []
if len(self.indexes) > 25:
for i in range(25, len(self.indexes), 25):
self.extra_index_selects.append(
discord.ui.Select(
placeholder="Select index(es) to compose",
options=[
discord.SelectOption(
label=LONG_TO_SHORT_CACHE[index],
value=LONG_TO_SHORT_CACHE[index],
)
for index in self.indexes
][i : i + 25],
max_values=len(self.indexes[i : i + 25]),
min_values=1,
)
)
self.add_item(self.extra_index_selects[-1])
# Add an input field for "Deep", a "yes" or "no" option, default no
self.deep_select = discord.ui.Select(
placeholder="Deep Compose",
options=[
discord.SelectOption(label="Yes", value="yes"),
discord.SelectOption(label="No", value="no"),
],
max_values=1,
min_values=1,
)
self.add_item(self.deep_select)
# Add a button to the modal called "Compose"
self.add_item(
discord.ui.Button(
label="Compose", style=discord.ButtonStyle.green, custom_id="compose"
)
)
# The callback for the button
async def interaction_check(self, interaction: discord.Interaction) -> bool:
# Check that the interaction was for custom_id "compose"
if interaction.data["custom_id"] == "compose":
# Check that the user selected at least one index
# The total list of indexes is the union of the values of all the select menus
indexes = self.index_select.values + [
select.values[0] for select in self.extra_index_selects
]
# Remap them from the SHORT_TO_LONG_CACHE
indexes = [SHORT_TO_LONG_CACHE[index] for index in indexes]
if len(indexes) < 1:
await interaction.response.send_message(
embed=EmbedStatics.get_index_compose_failure_embed(
"You must select at least 1 index"
),
ephemeral=True,
)
else:
composing_message = await interaction.response.send_message(
embed=EmbedStatics.get_index_compose_progress_embed(),
ephemeral=True,
)
# Compose the indexes
try:
price = await self.index_cog.compose_indexes(
self.user_id,
indexes,
self.name,
False
if not self.deep_select.values
or self.deep_select.values[0] == "no"
else True,
)
except ValueError as e:
await interaction.followup.send(
str(e), ephemeral=True, delete_after=180
)
return False
except Exception as e:
traceback.print_exc()
await interaction.followup.send(
embed=EmbedStatics.get_index_compose_failure_embed(
"An error occurred while composing the indexes: " + str(e)
),
ephemeral=True,
delete_after=180,
)
return False
await interaction.followup.send(
embed=EmbedStatics.get_index_compose_success_embed(price),
ephemeral=True,
delete_after=180,
)
# Try to direct message the user that their composed index is ready
try:
await self.index_cog.bot.get_user(self.user_id).send(
f"Your composed index is ready! You can load it with /index load now in the server."
)
except discord.Forbidden:
pass
try:
composing_message: Interaction
await composing_message.delete_original_response()
except:
traceback.print_exc()
else:
await interaction.response.defer(ephemeral=True)
| [] |
2024-01-10 | nitesh-jaswal/chatgpt-template-maker | py~api~services~__chatgpt_client.py | from __future__ import annotations
import logging
from api.settings import OpenAIAuthSettings, OpenAIAPISettings
from api.exceptions import OpenAIClientAuthException, OpenAIAPIException , OpenAIErrorKind
from api.models.openai_models import Role, Prompt, PromptBuffer
from api.models.openai_models import OpenAIChatResponse, OpenAIChatRequest, FinishReason
from typing import Dict, List, cast
from .__baseclient import BaseClient
DEFAULT_SYSTEM_PROMPT = Prompt(
role=Role.SYSTEM,
content="Please answer in less than 200 words the response to the following query"
)
def _get_logger() -> logging.Logger:
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
return logger
class ChatGPTClient:
auth: OpenAIAuthSettings | None = None
def __init__(self, api_settings: OpenAIAPISettings, logger: logging.Logger | None = None):
self.logger = logger if logger else _get_logger()
self.system_prompt = DEFAULT_SYSTEM_PROMPT if api_settings.system_prompt is None else api_settings.system_prompt
self._buffer: PromptBuffer = PromptBuffer(api_settings.max_prompts)
self.api_settings = api_settings
self._client = self.__create_client()
def __create_client(self):
if self.auth is None:
raise OpenAIClientAuthException("Client auth settings not available. Please verify")
return BaseClient(self.auth.api_key.get_secret_value(), self.auth.organization)
def buffer_length(self) -> int:
return len(self._buffer)
# TODO: Change interface to List[str; Also tokenizer]
def add_messages(self, messages: List[Prompt]) -> ChatGPTClient:
self._buffer.extend(messages)
return self
def send_messages(self) -> Prompt:
"""`None` or `raises OpenAIAPIException(ErrorKind.Enum)`"""
request = OpenAIChatRequest(
model="gpt-3.5-turbo",
messages=self._buffer.to_list(),
max_tokens=10,
logit_bias={"50256": -100},
user="asodioasijd"
) # type: ignore
raw: OpenAIChatResponse = self._client.send(request)
if len(raw.choices) < 1:
raise OpenAIAPIException(OpenAIErrorKind.EMPTY, "No chat responses were received")
last_choice = raw.choices[-1]
if last_choice.message.role != Role.ASSISTANT:
raise OpenAIAPIException(OpenAIErrorKind.INTERNAL_ERROR, f"Incorrect role in chat response. Expected `Assistant` role, received `{last_choice.message.role}`")
match last_choice:
case _: return last_choice.message
@property
def buffer_list(self) -> List[Prompt]:
return cast(List[Prompt], self._buffer[:] ) | [
"Please answer in less than 200 words the response to the following query"
] |
2024-01-10 | nitesh-jaswal/chatgpt-template-maker | py~api~services~__baseclient.py | import openai
import json
from typing import Dict
from api.models.openai_models import OpenAIChatRequest, OpenAIChatResponse
from api.models.openai_models.__response import Usage
class BaseClient:
def __init__(self, api_key: str, organization: str):
openai.organization = organization
openai.api_key = api_key
def send(self, request: OpenAIChatRequest) -> OpenAIChatResponse:
request_dict: Dict[str, str] = json.loads(request.json())
print(request_dict)
response = openai.ChatCompletion.create(**request_dict)
return OpenAIChatResponse.parse_obj(response)
| [] |
2024-01-10 | LuckCow/ModularIntellect | src~agents~knowledgebase_query_agent.py | import pickle
import os
from langchain import OpenAI, LLMChain, FAISS
from langchain.callbacks import StdOutCallbackHandler
from langchain.callbacks.manager import CallbackManager
from langchain.chains import ChatVectorDBChain
from langchain.chains.question_answering import load_qa_chain
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import BaseLLM
from langchain.text_splitter import RecursiveCharacterTextSplitter
from src.components.itask import ITask
from src.services.chainlang_agent_service import BaseChainLangAgent
from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT,
QA_PROMPT)
from src.visualization.vector_search import visualize_vector_search
from src.web.socketio_callbackmanager import SocketIOCallbackHandler
def ingest_docs(knowledge_path: str, storage_path: str):
"""Get documents from repository."""
loader = DirectoryLoader(knowledge_path, loader_cls=TextLoader, loader_kwargs={"encoding": "utf-8"},
recursive=True, silent_errors=True)
raw_documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
documents = text_splitter.split_documents(raw_documents)
embeddings = OpenAIEmbeddings(allowed_special=["<|endoftext|>"])
vectorstore = FAISS.from_documents(documents, embeddings)
# Save vectorstore
with open(storage_path, "wb") as f:
pickle.dump(vectorstore, f)
return vectorstore
class SimpleKnowledgeBaseQueryAgent(BaseChainLangAgent):
"""Agent that queries a knowledge base."""
def __init__(self, llm: BaseLLM, knowledge_path: str, storage_path: str="vectorstore.pkl", socketio=None):
# set llm (from dependency injection)
self.llm = llm
# check if storage_path exists
if not os.path.exists(storage_path):
print("Ingesting knowledge to create vectorstore")
self.vectorstore = ingest_docs(knowledge_path, storage_path)
else:
with open(storage_path, "rb") as f:
self.vectorstore = pickle.load(f)
self.socketio = socketio
# initialize chat history
self.chat_history = []
super().__init__()
def _get_chain(self):
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
manager = CallbackManager([])
manager.set_handler(StdOutCallbackHandler())
if self.socketio:
manager.add_handler(SocketIOCallbackHandler(self.socketio, 'ChatVectorDBChain'))
question_generator = LLMChain(
llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager, verbose=True
)
doc_chain = load_qa_chain(
self.llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager, verbose=True
)
qa = ChatVectorDBChain(
vectorstore=self.vectorstore,
combine_docs_chain=doc_chain,
question_generator=question_generator,
)
return qa
def execute(self, task: ITask):
#visualize_vector_search(self.vectorstore, task)
result = self._chain({"question": task, "chat_history": self.chat_history})
self.chat_history.append((task, result["answer"]))
return result['answer']
| [] |
2024-01-10 | LuckCow/ModularIntellect | src~memory~neo4j_docstore.py | from typing import Dict, Union
from langchain.docstore.base import AddableMixin, Docstore
from langchain.docstore.document import Document
from neo4j import GraphDatabase
class Neo4jDocstore(Docstore, AddableMixin):
"""Langchain Docstore implementation for a Neo4j database."""
def __init__(self, uri, auth):
"""Initialize with Neo4j driver."""
self.uri = uri
self.auth = auth
self.driver = None
self._connect()
def _connect(self):
"""Connect to the Neo4j database."""
self.driver = GraphDatabase.driver(self.uri, auth=self.auth)
def add(self, texts: Dict[str, Document]) -> None:
"""Add texts to Neo4j database."""
documents = {}
notes = {}
interactions = {}
for doc_id, doc in texts.items():
if doc.metadata.get("node_type"):
doc_type = doc.metadata.pop("node_type")
else:
doc_type = "document"
# classify the node type
if doc_type == "document":
documents[doc_id] = doc
elif doc_type == "note":
notes[doc_id] = doc
elif doc_type == "interaction":
interactions[doc_id] = doc
if documents:
self._add_documents(documents)
if notes:
self._add_notes(notes)
if interactions:
self._add_interactions(interactions)
def _add_notes(self, texts: Dict[str, Document]):
with self.driver.session() as session:
for doc_id, doc in texts.items():
references = doc.metadata.get("references", [])
query = """
MERGE (n:Note {id: $doc_id})
ON CREATE SET n += $properties
WITH n
UNWIND $references AS ref_id
MATCH (ref_node) WHERE ID(ref_node) = toInteger(ref_id)
MERGE (n)-[:REFERENCES]->(ref_node)
"""
properties = {k: v for k, v in doc.metadata.items() if k != "references"}
properties["text"] = doc.page_content
session.run(query, doc_id=doc_id, properties=properties, references=references)
def _add_interactions(self, texts: Dict[str, Document]):
with self.driver.session() as session:
for doc_id, doc in texts.items():
conversation_id = doc.metadata.get("conversation_id", None)
query = """
MERGE (i:Interaction {id: $doc_id})
ON CREATE SET i += $properties
WITH i
MERGE (c:Conversation {id: $conversation_id})
MERGE (i)-[:PART_OF]->(c)
"""
properties = {k: v for k, v in doc.metadata.items() if k != "conversation_id"}
properties["text"] = doc.page_content
session.run(query, doc_id=doc_id, properties=properties, conversation_id=conversation_id)
def _add_documents(self, texts: Dict[str, Document]):
# Group texts by source, add order and previous chunk metadata for neo4j formatting
docsrc_to_chunks = {}
prev_docsrc = None
prev_chunk_id = None
chunk_order = 0
for doc_id, doc in texts.items():
chunk = {"id": doc_id}
doc_src = doc.metadata.get("source", "unspecified source")
chunk_props = {"text": doc.page_content}
chunk_props.update(doc.metadata)
# Reset counter and chunk pointer on new source
if prev_docsrc != doc_src:
prev_chunk_id = None
chunk_order = 0
# Add order and previous chunk metadata
chunk_props["order"] = chunk_order
chunk["prev_id"] = prev_chunk_id
chunk['properties'] = chunk_props
docsrc_to_chunks.setdefault(doc_src, []).append(chunk)
# Update previous data
prev_docsrc = doc_src
chunk_order += 1
prev_chunk_id = chunk["id"]
with self.driver.session() as session:
for doc_src, chunks in docsrc_to_chunks.items():
chunks_query = ("MERGE (d:Document {doc_title: $doc_title})\n"
"WITH d\n"
"UNWIND $chunks AS chunk\n"
"MERGE (c:Chunk {id: chunk.id})\n"
"ON CREATE SET c += chunk.properties\n"
"MERGE (d)<-[:PART_OF]-(c)\n"
"WITH c, chunk\n"
"MATCH (prev:Chunk {id: chunk.prev_id})\n"
"MERGE (prev)-[:CONTINUES]->(c)\n")
session.run(chunks_query, chunks=chunks, doc_title=doc_src)
def search(self, search_id: str) -> Union[str, Document]:
"""
Search for a document in Neo4j database and include connections in metadata
connections are returned in the `connections` field of the metadata and have the following format:
[
{
"type": "CONTINUES",
"connected_id": "UUID8324908",
"direction": "out"
},
...
]
"""
with self.driver.session() as session:
query = """
MATCH (i)
WHERE i.id = $id
OPTIONAL MATCH (i)-[r_out]->(connected_out)
OPTIONAL MATCH (i)<-[r_in]-(connected_in)
RETURN i.text AS text,
collect({type: type(r_out), connected_id: connected_out.id, direction: "out"}) as outgoing_connections,
collect({type: type(r_in), connected_id: connected_in.id, direction: "in"}) as incoming_connections
"""
result = session.run(query, id=search_id)
record = result.single()
if record:
# Omit Part-Of connections
connections = [conn for conn in record["outgoing_connections"] + record["incoming_connections"]
if conn['type'] != "PART_OF"]
metadata = {"id": search_id, "connections": connections}
return Document(page_content=record["text"], metadata=metadata)
else:
raise ValueError(f"ID not found: {search_id}")
# print('Error: ID not found.: ', search_id)
# return Document(page_content="Error not found: " + search_id, metadata={"id": search_id})
def __getstate__(self):
state = self.__dict__.copy()
del state['driver']
return state
def __setstate__(self, state):
self.__dict__.update(state)
# After unpickling, the connection needs to be reestablished
self._connect()
| [] |
2024-01-10 | LuckCow/ModularIntellect | src~utils~mapping_directory_loader.py | """Loading logic for loading documents from a directory."""
import logging
from pathlib import Path
from typing import List, Type, Union
from langchain.docstore.document import Document
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.html_bs import BSHTMLLoader
from langchain.document_loaders.text import TextLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
FILE_LOADER_TYPE = Union[
Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader]
]
logger = logging.getLogger(__name__)
def _is_visible(p: Path) -> bool:
parts = p.parts
for _p in parts:
if _p.startswith("."):
return False
return True
class MappingDirectoryLoader(BaseLoader):
"""Loading logic for loading documents from a directory."""
def __init__(
self,
path: str,
glob: str = "**/[!.]*",
silent_errors: bool = False,
load_hidden: bool = False,
loader_mapping=None, # function taking in file extension and returning loader class and kwargs
recursive: bool = False,
):
"""Initialize with path to directory and how to glob over it."""
# loader mapping maps file extensions to loader classes and loader kwargs
if loader_mapping is None:
# TODO: expand mapping to include more document loaders
# from https://python.langchain.com/en/latest/modules/indexes/document_loaders.html
def loader_mapping(ext: str):
if ext in (".txt", ".py"):
return TextLoader, {"encoding": "utf-8"}
elif ext in (".html", '.htm'):
return BSHTMLLoader, {}
elif ext == ".pdf":
return PyPDFLoader, {}
else:
return TextLoader, {"encoding": "utf-8"}
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.loader_mapping = loader_mapping
self.silent_errors = silent_errors
self.recursive = recursive
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.path)
docs = []
items = p.rglob(self.glob) if self.recursive else p.glob(self.glob)
for i in items:
if i.is_file():
if _is_visible(i.relative_to(p)) or self.load_hidden:
try:
loader_cls, loader_kwargs = self.loader_mapping(i.suffix)
sub_docs = loader_cls(str(i), **loader_kwargs).load()
docs.extend(sub_docs)
except Exception as e:
if self.silent_errors:
logger.warning(e)
else:
raise e
return docs
| [] |
2024-01-10 | LuckCow/ModularIntellect | src~agents~alignment_auditor.py | from langchain import LLMChain, PromptTemplate
from langchain.callbacks import CallbackManager, StdOutCallbackHandler
from langchain.llms import BaseLLM
from src.components.itask import ITask
from src.services.chainlang_agent_service import BaseChainLangAgent
class AlignmentAuditingAgent(BaseChainLangAgent):
"""Breaks a complex task into subtasks that can be assigned to another agent"""
# Task Creation Prompt
PROMPT = """You are a responsible agent who is overseeing the work of other agents."""
def __init__(self, llm: BaseLLM):
self.llm = llm
self.prompt = PromptTemplate(input_variable=["alignment_goal", "tasks"], template=self.PROMPT)
super().__init__()
def _get_chain(self):
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
manager = CallbackManager([])
manager.set_handler(StdOutCallbackHandler())
question_generator = LLMChain(
llm=self.llm, prompt=self.prompt, callback_manager=manager, verbose=True
)
return question_generator
def execute(self, task: ITask):
self._chain.predict({"objective": task, "agent_list": self.AGENT_LIST, "past_tasks": ""})
# Example:
"""Please systematically break down this objective into a list of subtasks that can be completed by a single
specialized agent. Each agent can solve a particular type of problem as specified in its description. Each task consists of a
task_description and a task_justification. The task_description is a short description of the task that explains what
the agent should do to complete the task. The task_justification explains how completing the task will contribute to
the overall objective. Note that there are 2 special case agents for tasks that do not fit into the other categories.
The output should be a list of tasks in the format: [task_number]. [assigned_agent] | [task_description] | [task_justification]
Task Agents (Name - Description):
RESEARCHER - Given a topic or question, this agent will search the internet for relevant information and store it for future reference.
CALCULATOR - Given a math question, this agent will calculate the exact result.
PROGRAMMER - Given a programming task, this agent will write the code to complete it.
WRITER - Given a general and simple task, this agent will write out the solution to it using results from previous tasks and memory.
Special Case Agents:
TASK_DECOMPOSER - Given a task that is too complicated for a single agent to complete, this agent will break it down into simpler subtasks that can be processed.
IMPOSSIBLE_TASK - Given a task that cannot be solved by any of the other agents, assign this task to this agent and it will explain why it is impossible.
The objective is: Find the weight and diameter of the planet Jupiter in terms of the weight and length of a 2003 Honda Accord.
Justification of Objective: I want to give a relatable idea of scale for a presentation I am giving.
Subtasks to Complete:"""
# Result:
""" RESEARCHER | Find the weight and dimensions of a 2003 Honda Accord | This information will serve as a reference for comparison with Jupiter's weight and diameter.
RESEARCHER | Find the weight and diameter of the planet Jupiter | This information will be used to compare with the weight and dimensions of a 2003 Honda Accord.
CALCULATOR | Convert Jupiter's weight and diameter into terms of 2003 Honda Accord's weight and length | This will allow for a relatable comparison of Jupiter's weight and diameter.
WRITER | Write a concise explanation of the comparison between Jupiter's weight and diameter and the weight and length of a 2003 Honda Accord | This explanation will help the user convey a relatable idea of scale for their presentation.""" | [
"You are a responsible agent who is overseeing the work of other agents."
] |
2024-01-10 | LuckCow/ModularIntellect | src~visualization~vector_search.py | import numpy as np
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import VectorStore
from sklearn.manifold import TSNE
import plotly.express as px
import pandas as pd
import plotly.subplots as sp
from sklearn.decomposition import PCA
import re
def visualize_vector_search(vectorstore: VectorStore, query: str, embeddings=None):
"""
Visualize the vector search with PCA and t-SNE.
:param vectorstore: LangChain VectorStore
:param query: unembedded str query
:param embeddings: Embedding system (defaults to OpenAI)
:return: None (opens browser window with visualization using plotly)
"""
# visualize search results
# embed task vector
if embeddings is None:
embeddings = OpenAIEmbeddings()
embedded_query = embeddings.embed_query(query)
# document contents (escape chars that break plotly)
doc_contents = [d.page_content[:100] for d in vectorstore.docstore._dict.values()] + [query]
chars_to_remove = "<>/'\"`" # Not sure which chars exactly break it
pattern = "[" + re.escape(chars_to_remove) + "]"
doc_contents = [re.sub(pattern, '', s) for s in doc_contents]
visualize_faiss_index_with_query(vectorstore.index, embedded_query, doc_contents, k=4)
def visualize_faiss_index_with_query(index, query_vector, doc_texts, k=4):
"""
Visualize the vector search with PCA and t-SNE.
:param index: FAISS index
:param query_vector: embedded vector to search for
:param doc_texts: list of document texts for hover text (must be in same order as index)
:param k: number of closest vectors to show
:return: None (opens browser window with visualization using plotly)
"""
# Search for the k closest vectors to the query vector
distances, indices = index.search(np.array([query_vector]), k)
# Retrieve all the vectors from the FAISS index
retrieved_vectors = index.reconstruct_n(0, index.ntotal)
# Apply PCA to reduce dimensionality to 2D
pca = PCA(n_components=2)
all_data_pca = pca.fit_transform(np.vstack([retrieved_vectors, query_vector]))
# Apply t-SNE to reduce dimensionality to 2D
tsne = TSNE(n_components=2, random_state=42)
all_data_tsne = tsne.fit_transform(np.vstack([retrieved_vectors, query_vector]))
# Create a DataFrame for the PCA and t-SNE data and assign labels and add text descriptions
data_pca_df = pd.DataFrame(all_data_pca, columns=['PCA 1', 'PCA 2'])
data_tsne_df = pd.DataFrame(all_data_tsne, columns=['t-SNE 1', 't-SNE 2'])
data_pca_df['label'] = data_tsne_df['label'] = 'Other'
data_pca_df.loc[indices[0], 'label'] = data_tsne_df.loc[indices[0], 'label'] = 'Close'
data_pca_df.loc[data_pca_df.index[-1], 'label'] = data_tsne_df.loc[data_tsne_df.index[-1], 'label'] = 'Query'
data_pca_df['doc_text'] = data_tsne_df['doc_text'] = doc_texts
# Create subplots
fig = sp.make_subplots(rows=1, cols=2, subplot_titles=('PCA', 't-SNE'))
# Create PCA scatter plot and add to subplot
pca_scatter = px.scatter(data_frame=data_pca_df, x='PCA 1', y='PCA 2', color='label',
color_discrete_sequence=['blue', 'magenta', 'red'],
hover_data={'doc_text': True, 'label': False, 'PCA 1': False, 'PCA 2': False},
)
for trace in pca_scatter.data:
fig.add_trace(trace, row=1, col=1)
# Create t-SNE scatter plot and add to subplot
tsne_scatter = px.scatter(data_frame=data_tsne_df, x='t-SNE 1', y='t-SNE 2', color='label',
color_discrete_sequence=['blue', 'magenta', 'red'],
hover_data={'doc_text': True, 'label': False, 't-SNE 1': False, 't-SNE 2': False},
)
for trace in tsne_scatter.data:
fig.add_trace(trace, row=1, col=2)
pca_scatter.update_traces(
marker=dict(size=[5 for i in range(len(data_pca_df))],
opacity=[0.5 for i in range(len(data_pca_df))]),
selector=dict(type='scattergl'))
tsne_scatter.update_traces(
marker=dict(size=[5 for i in range(len(data_tsne_df))],
opacity=[0.5 for i in range(len(data_tsne_df))]),
selector=dict(type='scattergl'))
# Update the layout and show the plot
fig.update_layout(title='Dimensionality Reduction Visualization of Vectors from a FAISS Index with Query Vector')
fig.show()
if __name__ == '__main__':
import faiss
# Generate 100-dimensional random vectors
n_vectors = 500
dim = 1882
data = np.random.rand(n_vectors, dim).astype('float32')
# Create a FAISS index
index = faiss.IndexFlatL2(dim)
index.add(data)
# Generate a random query vector
query_vector = np.random.rand(dim).astype('float32')
# Create hover-over labels for the vectors
doc_texts = [f'Document #{i} Contents' for i in range(n_vectors)] + ['Query Contents']
# Visualize the FAISS index with the query vector
visualize_faiss_index_with_query(index, query_vector, doc_texts) | [] |
2024-01-10 | LuckCow/ModularIntellect | src~agents~task_decomposition_agent.py | from langchain import LLMChain, PromptTemplate
from langchain.callbacks import CallbackManager, StdOutCallbackHandler
from langchain.llms import BaseLLM
from src.components.itask import ITask
from src.services.chainlang_agent_service import BaseChainLangAgent
class TaskDecompositionAgent(BaseChainLangAgent):
"""Breaks a complex task into subtasks that can be assigned to another agent"""
# Task Creation Prompt
PROMPT = """Please systematically break down this objective into a list of subtasks that can be completed by a single
specialized agent. Each agent can solve a particular type of problem as specified in its description. Each task consists of a
task_description and a task_justification. The task_description is a short description of the task that explains what
the agent should do to complete the task. The task_justification explains how completing the task will contribute to
the overall objective.
The output should be a list of tasks in the format: [task_number]. [assigned_agent] | [task_description] | [task_justification]
Specialized Agents (Name - Description):
{agent_list}
{past_tasks}
The objective is: {objective}
Subtasks to Complete:"""
# Agent List: list of agents that can complete tasks and descriptions of how they accomplish it
AGENT_LIST = """RESEARCHER - Given a topic or question, this agent will search the internet for relevant information and store it for future reference.
CALCULATOR - Given a math question, this agent will calculate the exact result.
PROGRAMMER - Given a programming task, this agent will write the code to complete it.
WRITER - Given a general and simple task, this agent will write out the solution to it using results from previous tasks and memory.
Special Case Agents:
TASK_DECOMPOSER - Given a task that is too complicated for a single agent to complete, this agent will break it down into simpler subtasks that can be processed.
IMPOSSIBLE_TASK - Given a task that cannot be solved by any of the other agents, assign this task to this agent and it will explain why it is impossible."""
# TODO: maybe add example for few shot learning
def __init__(self, llm: BaseLLM):
self.llm = llm
self.prompt = PromptTemplate(input_variable=["objective", "agent_list", "past_tasks"], template=self.PROMPT)
super().__init__()
def _get_chain(self):
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
manager = CallbackManager([])
manager.set_handler(StdOutCallbackHandler())
question_generator = LLMChain(
llm=self.llm, prompt=self.prompt, callback_manager=manager, verbose=True
)
return question_generator
def execute(self, task: ITask):
self._chain.predict({"objective": task, "agent_list": self.AGENT_LIST, "past_tasks": ""})
# Example:
"""Please systematically break down this objective into a list of subtasks that can be completed by a single
specialized agent. Each agent can solve a particular type of problem as specified in its description. Each task consists of a
task_description and a task_justification. The task_description is a short description of the task that explains what
the agent should do to complete the task. The task_justification explains how completing the task will contribute to
the overall objective. Note that there are 2 special case agents for tasks that do not fit into the other categories.
The output should be a list of tasks in the format: [task_number]. [assigned_agent] | [task_description] | [task_justification]
Task Agents (Name - Description):
RESEARCHER - Given a topic or question, this agent will search the internet for relevant information and store it for future reference.
CALCULATOR - Given a math question, this agent will calculate the exact result.
PROGRAMMER - Given a programming task, this agent will write the code to complete it.
WRITER - Given a general and simple task, this agent will write out the solution to it using results from previous tasks and memory.
Special Case Agents:
TASK_DECOMPOSER - Given a task that is too complicated for a single agent to complete, this agent will break it down into simpler subtasks that can be processed.
IMPOSSIBLE_TASK - Given a task that cannot be solved by any of the other agents, assign this task to this agent and it will explain why it is impossible.
The objective is: Find the weight and diameter of the planet Jupiter in terms of the weight and length of a 2003 Honda Accord.
Justification of Objective: I want to give a relatable idea of scale for a presentation I am giving.
Subtasks to Complete:"""
# Result:
""" RESEARCHER | Find the weight and dimensions of a 2003 Honda Accord | This information will serve as a reference for comparison with Jupiter's weight and diameter.
RESEARCHER | Find the weight and diameter of the planet Jupiter | This information will be used to compare with the weight and dimensions of a 2003 Honda Accord.
CALCULATOR | Convert Jupiter's weight and diameter into terms of 2003 Honda Accord's weight and length | This will allow for a relatable comparison of Jupiter's weight and diameter.
WRITER | Write a concise explanation of the comparison between Jupiter's weight and diameter and the weight and length of a 2003 Honda Accord | This explanation will help the user convey a relatable idea of scale for their presentation."""
# multistep prompt test
"""Answer the following questions as best you can, show your work throughout a step by step process.
Use the following format:
Question: the input question you must answer
Thought: list out all of the possible next steps to take
Action: choose the action that will most quickly lead to the goal
Observation: the result of the action
... (this Thought/Action/Observation can repeat N times)
Thought: The problem is now solved
Final Answer: the final state has been reached and the question is answered
Begin! Remember to take as many steps as needed to get to the final answer.
Question: {input}
If you are given a 5 gallon bucket and a 3 gallon bucket, both with no volume markings, and an infinite supply of water, how would you get exactly 2 gallons in one of the buckets?
""" | [
"Please systematically break down this objective into a list of subtasks that can be completed by a single \nspecialized agent. Each agent can solve a particular type of problem as specified in its description. Each task consists of a \ntask_description and a task_justification. The task_description is a short description of the task that explains what\nthe agent should do to complete the task. The task_justification explains how completing the task will contribute to\nthe overall objective.\nThe output should be a list of tasks in the format: [task_number]. [assigned_agent] | [task_description] | [task_justification]\nSpecialized Agents (Name - Description): \n{agent_list}\n{past_tasks}\nThe objective is: {objective}\nSubtasks to Complete:"
] |
2024-01-10 | LuckCow/ModularIntellect | src~web~socketio_callbackmanager.py | import logging
from typing import Any, Dict, List, Union
from abc import ABC, abstractmethod
from threading import Event
from flask import jsonify
from flask_socketio import SocketIO
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult, AgentAction, AgentFinish
from src.web.chain_state import ChainState
logger = logging.getLogger(__name__)
class SocketIOCallbackHandler(BaseCallbackHandler):
def __init__(self, socketio: SocketIO, room: str):
self.socketio = socketio
self.room = room
self.chain_state = ChainState()
def chain_execution_state(self):
#print('chain_execution_state: ', self.chain_state.chain_blocks)
return jsonify(
chainBlocks=[{
'title': block.title,
'inputs': block.inputs,
'outputs': block.outputs
} for block in self.chain_state.chain_blocks],
currentBlockIndex=self.chain_state.current_block_index,
)
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> Any:
logging.info('on_chain_start: serialized: %s, inputs: %s', serialized, inputs)
self.chain_state.add_chain_block('Chain Title Placeholder', inputs)
self.socketio.emit('chain_start', {'serialized': serialized, 'inputs': inputs})#, room=self.room)
# Create a socket event listener for input updates
@self.socketio.on("input_update")
def handle_input_update(data):
blockIndex = data["blockIndex"]
key = data["key"]
value = data["value"]
# Janky pass by reference
inputs[key] = value
# Create an Event to wait for user confirmation
chain_start_confirm_event = Event()
# Create callback function to continue execution
@self.socketio.on('chain_start_confirm')
def chain_start_confirm_callback():
logging.info('chain_start_confirm_callback')
chain_start_confirm_event.set()
# Wait for the event to be set by the frontend's confirmation
chain_start_confirm_event.wait()
# Remove the event listener to avoid memory leaks
#self.socketio.off('chain_start_confirm', chain_start_confirm_callback, room=self.room)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
logging.info('on_chain_end: outputs: %s', outputs)
self.chain_state.set_chain_block_outputs(outputs)
self.socketio.emit('chain_end', {'outputs': outputs})#, room=self.room)
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> Any:
self.socketio.emit('llm_start', {'serialized': serialized, 'prompts': prompts}, room=self.room)
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
self.socketio.emit('llm_new_token', {'token': token}, room=self.room)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
self.socketio.emit('llm_end', {'response': response.json()}, room=self.room)
def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any:
self.socketio.emit('llm_error', {'error': str(error)}, room=self.room)
def on_chain_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any:
self.socketio.emit('chain_error', {'error': str(error)}, room=self.room)
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> Any:
self.socketio.emit('tool_start', {'serialized': serialized, 'input_str': input_str}, room=self.room)
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
self.socketio.emit('tool_end', {'output': output}, room=self.room)
def on_tool_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> Any:
self.socketio.emit('tool_error', {'error': str(error)}, room=self.room)
def on_text(self, text: str, **kwargs: Any) -> Any:
self.socketio.emit('text', {'text': text}, room=self.room)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
self.socketio.emit('agent_action', {'action': action.log}, room=self.room)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
self.socketio.emit('agent_finish', {'finish': finish.log}, room=self.room)
| [] |
2024-01-10 | LuckCow/ModularIntellect | src~agents~graphdb_traversal_chain.py | """
GraphDBTraversalChain allows an LLM to explore context chunks and their connections in a graph database,
extract relevant information, and continue exploring the graph database for additional information.
This helps the LLM to process and summarize the information obtained from the
database and also determine whether further exploration is needed.
"""
import re
from langchain import PromptTemplate
from langchain.schema import Document, SystemMessage
from langchain.chains import LLMChain
from langchain.chains.base import Chain
from typing import Dict, List
from langchain.vectorstores import VectorStore
from pydantic import PrivateAttr
# gives instructions for how to use memory system
mem_system_message = """Act as a researcher trying to answer a question about a topic. You have access to a knowledge base of documents broken down into context chunks that are connected to eachother.
Information will be presented to you with a small number of chunks at a time. Each chunk of information may or may not be relevant to the question.
Additionally, each context chunk also has connection references to other chunks that are not immediately presented, but can be accessed by requesting to lookup the chunk referenced by the connection.
For each chunk that you are presented with, list out the chunk number, then evaluate whether or not the chunk is relevant to the question. If the chunk is relevant, provide a summary of the relevant information in the chunk, otherwise, just put 'None'
Then, give the best answer you can with the information that you have so far.
Finally, reflect on that answer and decide if it can be improved by looking up additional information from the connections. If so, list out any of the connections that you think would help improve the answer.
For example, if you are presented a chunk that seems like it is about to have important information to answer the question,
but stops and has a connection that "CONTINUES TO" another chunk, you can respond by saying: "LOOKUP CONTEXT CHUNK #<chunk id>".
For example, your answer should follow this format:
"Chunk Review:
Chunk #<chunk id> - Relevant - <summary of relevance information in chunk>
Chunk #<chunk id> - Not Relevant - None
Chunk #<chunk id> - Relevant - <summary of relevance information in chunk>
Answer so far: <answer>
Further exploration chunk connection lookups:
CONTINUES TO Context Chunk #<chunk id>
CONTINUES TO Context Chunk #<chunk id>
"
"""
# Presents context information
mem_query_template = PromptTemplate.from_template("""The question is:
QUESTION: {question}
Here is the information gathered so far:
WORKING SUMMARY: {working_summary}
Below are the relevant context chunks that have been looked up so far:
CONTEXT CHUNKS:
{context_chunks}""")
class GraphDBTraversalChain(Chain):
"""
give llm context chunks and their connections within a graph db and
ask the llm to pick out relevant information and continue to explore the graph db for more information
Attributes:
llm_chain: LLMChain used to query LLM
graph_vector_store: VectorStore instance that is used to retrieve context chunks based on the input.
max_depth: The maximum depth to traverse in the graph database (default is 3).
starting_chunks: The number of initial context chunks to provide to the LLM (default is 4).
"""
llm_chain: LLMChain = None
graph_vector_store: VectorStore
max_depth: int = 3
starting_chunks: int = 4
_document_map_id_to_num: Dict[str, str] = PrivateAttr()
_document_map_num_to_id: Dict[str, str] = PrivateAttr()
@property
def input_keys(self) -> List[str]:
return ['input']
@property
def output_keys(self) -> List[str]:
return ['output']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._document_map_id_to_num = {} # maps db id to local context number
self._document_map_num_to_id = {} # maps local context number to db id
def explain_document_graph(self, documents: list[Document]) -> str:
"""
Create a plain text description of a documents from graph database and their connections
to feed context to llm
Output format:
Document Chunk#<num>
Content: <all of the content of the document here>
Connections:
CONTINUES to Document Chunk #<num>
CONTINUES FROM Document Chunk #<num>
"""
description = []
for document in documents:
# Document ID and content
description.append(f"Context Chunk #{self.map_id_to_number(document.metadata['id'])}")
description.append(f"Content: {document.page_content}")
# Document connections
description.append("Connections:")
connections = document.metadata.get('connections', [])
for connection in connections:
if 'type' in connection and 'connected_id' in connection and 'direction' in connection:
direction = "TO" if connection['direction'] == "out" else "FROM"
conn_num = self.map_id_to_number(connection['connected_id'])
description.append(f" {connection['type']} {direction} Document Chunk #{conn_num}")
description.append("\n")
return "\n".join(description)
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
# Look up relevant context chunks from vector store
context_docs = self.graph_vector_store.search(inputs['input'], search_type='similarity', k=self.starting_chunks)
# Create a mapping from db id to local context number
for doc in context_docs:
self.add_to_document_map(doc)
working_summary = "None"
depth = 0
while True:
output = self.llm_chain({'question': inputs['input'],
'working_summary': working_summary,
'context_chunks': self.explain_document_graph(context_docs)})
# Parse output to get working summary
output.update(_parse_output(output['response']))
working_summary = output['working_summary']
# Stop calling once no additional exploration calls are made or the max depth is reached
depth += 1
if depth >= self.max_depth or not output['requested_connections']:
break
# Look up the requested connections
context_docs = []
for connection in output['requested_connections']:
# dereference local context number to db id
connection_id = self.map_num_to_id(connection['num'])
doc = self.graph_vector_store.docstore.search(connection_id)
self.add_to_document_map(doc)
context_docs.append(doc)
return {'output': output}
def add_to_document_map(self, doc):
# add document
self._add_to_document_map(doc.metadata['id'])
# add connections
for connection in doc.metadata.get('connections', []):
self._add_to_document_map(connection['connected_id'])
def _add_to_document_map(self, doc_id):
# check if document is already in the map
if doc_id in self._document_map_id_to_num:
return
doc_num = str(len(self._document_map_id_to_num) + 1)
self._document_map_id_to_num[doc_id] = doc_num
self._document_map_num_to_id[doc_num] = doc_id
def map_id_to_number(self, doc_id):
return self._document_map_id_to_num[doc_id]
def map_num_to_id(self, num):
return self._document_map_num_to_id[num]
def _parse_output(output):
"""
Parse the output of the llm chain to get the working summary, requested connections, and reviewed chunks
returns chunks analysis, working summary and list of context chunks that were looked up
"""
# Sections within the output that we want to parse
markers = ['Chunk Review:', 'Answer so far:', 'Further exploration chunk connection lookups:']
# Escape the markers for regex use
markers = [re.escape(marker) for marker in markers]
# Create a pattern that matches the markers
pattern = '|'.join(markers)
# Split the output using the pattern
parts = re.split(pattern, output)
# Skip the first part, because it's before the first marker
_, chunk_review, summary, connections = parts
# strip whitespace
summary = summary.strip()
# Parse the chunk review section
chunk_data = []
for m in re.finditer(r'Chunk #(\d+) - (Relevant|Not Relevant) - (.*)', chunk_review):
chunk_data.append({'num': m.group(1),
'relevant': True if m.group(2) == 'Relevant' else False,
'summary': m.group(3)})
# Parse the connections section
connection_data = []
for m in re.finditer(r'(\w+) TO Context Chunk #(\d+)', connections):
connection_data.append({'num': m.group(2),
'connection_type': m.group(1)})
return {'reviewed_chunks': chunk_data, 'working_summary': summary, 'requested_connections': connection_data}
if __name__ == '__main__':
from langchain.callbacks import StdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from src.agents.chat_chain import ChatChain
from src.memory.triple_modal_memory import TripleModalMemory
import os
from dotenv import load_dotenv
# Set up the cache
import langchain
from langchain.cache import SQLiteCache
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
# initialize the memory
load_dotenv()
uri = os.getenv("NEO4J_URI")
user = os.getenv("NEO4J_USER")
password = os.getenv("NEO4J_PASSWORD")
mem = TripleModalMemory(uri, user, password)
# Create memory from docks or load from file if it exists
ingested = os.path.exists('../data/triple_modal_memory.faiss')
if not ingested:
knowledge_path = r'C:\Users\colli\Documents\AIPapers'
mem.ingest_docs(knowledge_path)
mem.save()
print("Memory initialized and saved.")
else:
mem.load()
print("Memory loaded.")
handler = StdOutCallbackHandler()
llm = ChatOpenAI(
model_name="gpt-4", # "gpt-3.5-turbo"
temperature=0,
verbose=True
)
chain = ChatChain(llm=llm, prompt=mem_query_template, callbacks=[handler], system_message=mem_system_message)
knowledge_base_query_agent = GraphDBTraversalChain(llm_chain=chain, graph_vector_store=mem.vector_store)
# Example Research questions:
# What are different methods of providing language models with additional context to better answer questions?
# How can semantic search be used in conjunction with large language models in order to better answer questions?
# What are some techniques for achieving better general intelligence in language models?
def main_loop():
try:
while True:
question = input("Enter a question: ")
print(knowledge_base_query_agent.run(question))
except KeyboardInterrupt:
print("Shutdown: Saving...")
mem.save()
print("Shutdown: Complete")
else:
print("Completed all tasks.")
"""
Example:-------------------------------------------------
What are the known uses of Glowing Moon Berries?
Context Chunk #1
Content: 'Glowing Moon Berries are a rare type of berry found only on the moon Zaphiron in the Pegasus galaxy. These luminescent berries shine brightly in the moon's perennial twilight, giving them their distinctive name.'
Connections:
CONTINUES TO Context Chunk #3
CONTINUES FROM Context Chunk #4
Context Chunk #2
Content: 'Glowing Moon Berries have a bitter, almost electric taste and little nutrition value. They are not considered edible by most species, and have been largely ignored until some interesting uses were discovered recently.'
Connections:
CONTINUES TO Context Chunk #5
CONTINUES FROM Context Chunk #6
Context Chunk #7
Content: 'Nebula Nectar is an extraordinary substance, harvested from the heart of the Orion Nebula. Nebula Nectar resembles Glowing Moon Berries in some ways which is interesting given how far apart the two galaxies are.'
Connections:
CONTINUES TO Context Chunk #8
CONTINUES FROM Context Chunk #9
----
Document #5
Content: 'Glowing Moon Berries are known for their unique properties. They are used primarily as a power source for the nano-tech machinery on Zaphiron due to their unusually high energy output.'
Connections:
CONTINUES to Document #12
CONTINUES FROM Document #2
Document #3
Content: 'In 2225, during the maiden voyage of the interstellar exploration ship 'Star Wanderer', the crew made a surprising discovery on Zaphiron, an obscure moon in the Pegasus galaxy. Amidst the constant twilight of the moon's surface, they found clusters of luminescent berries, later named 'Glowing Moon Berries', their radiance illuminating the alien landscape with an ethereal glow.'
Connections:
CONTINUES to Document #15
CONTINUES FROM Document #1
Document #15
Content: 'Later in 2225, as the Star Wanderer's crew continued their exploration of Zaphiron, they uncovered ancient ruins of a long-lost civilization, revealing intricate carvings that eerily mirrored constellations observed from Earth. This discovery deepened the mystery of Zaphiron, hinting at a potential connection between this distant moon and our home planet'
Connections:
CONTINUES to Document #16
CONTINUES FROM Document #15
Document #12
Content: 'Remarkably, when juiced and refined, the berries can power the nano-tech machines for months on a single serving, outperforming any other known energy source. Furthermore, the energy they emit is clean and sustainable, making Glowing Moon Berries a crucial component in the maintenance of the delicate ecological balance on Zaphiron.'
Connections:
CONTINUES to Document #17
CONTINUES FROM Document #5
""" | [
"The question is: \nQUESTION: {question}\n\nHere is the information gathered so far:\nWORKING SUMMARY: {working_summary}\n\nBelow are the relevant context chunks that have been looked up so far:\nCONTEXT CHUNKS:\n{context_chunks}"
] |
2024-01-10 | LuckCow/ModularIntellect | src~agents~chat_chain.py | """Chain that carries on a conversation and calls an LLM."""
from typing import Dict, List, Any, Optional
from pydantic import Extra, Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.memory import ChatMessageHistory
from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseChatMessageHistory
from langchain.schema import SystemMessage
class ChatChain(LLMChain):
"""
Chain to have a conversation and maintain conversation history for Chat type models
Example:
.. code-block:: python
chat_chain = ChatChain(prompt=PromptTemplate(input_variables=["foo"], template="Say {foo}")
"""
prompt: BasePromptTemplate
system_message: str = None
history: BaseChatMessageHistory = Field(default_factory=ChatMessageHistory)
llm: BaseChatModel = Field(default_factory=ChatOpenAI)
output_key: str = "response" #: :meta private:
def __init__(self, **kwargs):
super().__init__(**kwargs)
# initialize system message
if self.system_message:
self.history.add_message(SystemMessage(content=self.system_message))
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Use this since so some prompt vars come from history."""
return self.prompt.input_variables
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
# format human message
prompt_value = self.prompt.format(**inputs)
# add to history
self.history.add_user_message(prompt_value)
# Call chat llm
response = self.llm(
self.history.messages,
callbacks=run_manager.get_child() if run_manager else None
)
# add response to history
self.history.add_ai_message(response.content)
# log results to run manager
if run_manager:
run_manager.on_text(f"history: {self.history.messages[:-2]}\nprompt: {prompt_value}\nresponse: {response.content}")
return {self.output_key: response.content}
@property
def _chain_type(self) -> str:
return "ChatChain"
if __name__ == '__main__':
# Test usage
from dotenv import load_dotenv
from langchain import PromptTemplate
from langchain.callbacks import StdOutCallbackHandler
from langchain.memory import ChatMessageHistory
from langchain.schema import SystemMessage
load_dotenv()
chat = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0,
verbose=True
)
system_message = """I would like you to recite the colors of the rainbow to the user. The list of colors, in order, should be: "red", "orange", "yellow", "green", "blue", "indigo", "violet".
However, please note that the user may not always ask for all seven colors at once. Instead, they may ask for a specific number of colors. When this happens, your task is to recite the requested number of colors from the list, starting from "red" and proceeding in order.
If the user asks for more colors at a later point, you should continue from where you last stopped. For example, if the user first requests three colors, you should say "red", "orange", "yellow". If the user then asks for two more colors, you should continue with "green", "blue".
You should remember the last color you recited and continue the sequence from there each time the user requests more colors. However, once you recite "violet", if the user requests more colors, you should start back from "red".
The user has requested 0 colors so the next one will be red."""
prompt = PromptTemplate.from_template("""Give me the next {number} colors""")
llmchain = ChatChain(llm=chat, prompt=prompt, callbacks=[StdOutCallbackHandler()],
system_message=system_message, verbose=True)
response = llmchain.run(number=3)
print(response)
# memory.buffer.append(prompt.format_messages(number=2))
response = llmchain.run(number=2)
print(response)
| [
"orange",
"Give me the next {number} colors",
"green",
"indigo"
] |
2024-01-10 | LuckCow/ModularIntellect | src~memory~triple_modal_memory.py | from typing import Dict, List, Any
import time
from langchain.schema import BaseMemory
from langchain import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from src.memory.neo4j_docstore import Neo4jDocstore
from langchain.embeddings import OpenAIEmbeddings
from neo4j import GraphDatabase
import faiss
from src.utils.mapping_directory_loader import MappingDirectoryLoader
class TripleModalMemory:
"""Provides interface for storing and retrieving data from the Neo4j+FAISS database."""
def __init__(self, uri, user, password):
# TODO: load in existing documents into the faiss index, enforcing them to be in sync
# or save the faiss index to disk (but then we need to either save every time something happens or risk corruption
# setup the docstore and vector store
neo4j_docstore = Neo4jDocstore(uri, auth=(user, password))
embedding = OpenAIEmbeddings()
index = faiss.IndexFlatL2(1536)
faiss_vector_store = FAISS(embedding.embed_query, index, docstore=neo4j_docstore, index_to_docstore_id={})
self.vector_store = faiss_vector_store
# setup the memory interface
class TripleModalAgentMemory(BaseMemory):
"""Langchain Memory class for chain interactions to be persisted into the Neo4j database."""
memory_key: str = "history"
def clear(self):
# Clear the Neo4j graph
with driver.session() as session:
query = "MATCH (n) DETACH DELETE n"
session.run(query)
@property
def memory_variables(self) -> List[str]:
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
text = inputs[list(inputs.keys())[0]]
similar_interactions = self._retrieve_similar_interactions(text)
# interactions_text = "\n".join(
# [f"{item['i.text']} ({item['i.timestamp']})" for item in similar_interactions])
return {self.memory_key: similar_interactions}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
input = inputs[list(inputs.keys())[0]]
output = outputs[list(outputs.keys())[0]]
timestamp = inputs.get("timestamp", None) # You can include an optional timestamp in the inputs
if timestamp is None:
import datetime
timestamp = datetime.datetime.utcnow().isoformat()
# TODO: identify and pass interaction identifiers
interaction_id = '1'
faiss_vector_store.add_texts([input, output],
metadatas=[{'timestamp': timestamp, 'source': 'user',
'node_type': 'interaction', 'conversation_id': interaction_id},
{'timestamp': timestamp, 'source': 'agent',
'node_type': 'interaction', 'conversation_id': interaction_id}])
def _store_interaction(self, text, timestamp, parent_id=None):
# Add the input to the FAISS index
faiss_vector_store.add_texts([text], metadatas=[{'timestamp': timestamp}]) # TODO: parent?
def _retrieve_similar_interactions(self, text, k=5):
# Search the FAISS index for similar interactions
return faiss_vector_store.similarity_search(text, k)
self.memory = TripleModalAgentMemory()
def store_task(self, task: str, timestamp: str):
"""Store a task in the memory."""
self.vector_store.add_texts([task], metadatas=[{'timestamp': timestamp, 'node_type': 'task'}])
def store_notes(self, notes: str, sources, timestamp: str):
"""Store notes that come from existing memory."""
self.vector_store.add_texts([notes], metadatas=[{'timestamp': timestamp,
'sources': sources, # TODO: setup sources
'node_type': 'notes'}])
def ingest_docs(self, path: str, chunk_size=1000, chunk_overlap=200):
"""Read, split and store code and other files from within the repository/folder."""
loader = MappingDirectoryLoader(path, recursive=True, silent_errors=False)
raw_documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
documents = text_splitter.split_documents(raw_documents)
for d in documents:
d.metadata["chunk_size"] = chunk_size
d.metadata["chunk_overlap"] = chunk_overlap
d.metadata["node_type"] = "document"
# Add documents to vector store in chunks with wait time to avoid getting rate limited
# by the OpenAI API (20 requests per minute)
for i in range(0, len(documents), 20):
self.vector_store.add_documents(documents[i:i+20])
print(f"Added documents {i} to {i+20}/{len(documents)} to vector store")
time.sleep(60)
#self.vector_store.add_documents(documents)
def save(self):
self.vector_store.save_local('../data', 'triple_modal_memory')
def load(self):
embedding = OpenAIEmbeddings()
self.vector_store = FAISS.load_local('../data', embedding, 'triple_modal_memory')
def verify(self):
print(self.vector_store.index.ntotal)
def search(self, query, k):
return self.vector_store.similarity_search(query, k)
def test_ingest_save(mem):
knowledge_path = r'C:\Users\colli\PycharmProjects\ModularIntellect\data\test_knowledgebase'
# storage_path = '../data/langchain.pkl'
mem.ingest_docs(knowledge_path)
mem.save()
if __name__ == '__main__':
import os
from dotenv import load_dotenv
load_dotenv()
uri = os.getenv("NEO4J_URI")
user = os.getenv("NEO4J_USER")
password = os.getenv("NEO4J_PASSWORD")
mem = TripleModalMemory(uri, user, password)
#test_ingest_save(mem)
mem.load()
mem.verify()
print(mem.vector_store.similarity_search("What are the implementations of BaseChainLangAgent?", 3))
# import datetime
# timestamp = datetime.datetime.utcnow().isoformat()
# #faiss_vector_store.add_texts(['Start of document ', 'middle of document', 'end of document'], metadatas=[{'timestamp': timestamp}, {'timestamp': timestamp}, {'timestamp': timestamp}])
#
# mem.save_context({"text": "The fictional nation of Jietao has 4 major cities: Niuy, Bietao, Cholkja, and Fosst"}, {})
# mem.save_context({"text": "Leeroy is a blacksmith that lives in Niuy"}, {})
# mem.save_context({"text": "Jaon is a mason that lives in Cholkja"}, {})
#
# print(mem.retrieve_similar_interactions("What nation does Leeroy live in?", 3))
#driver.close()
| [] |
2024-01-10 | LuckCow/ModularIntellect | src~experiments~code_base_summary_ingest.py | """
Prompt chaining and vectorstore example using LangChain
- load all code from a code base and answer questions about it as a chatbot.
Based on chat-langchain (https://github.com/hwchase17/chat-langchain/)
"""
import pickle
import os
import logging
from langchain.callbacks import CallbackManager, StdOutCallbackHandler
from langchain.chains import ChatVectorDBChain
from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT, QA_PROMPT)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.vectorstores.base import VectorStore
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from src.utils.mapping_directory_loader import MappingDirectoryLoader
logger = logging.getLogger(__name__)
def ingest_docs(path: str):
"""Read, split and store code and other files from within the repository/folder."""
loader = MappingDirectoryLoader(path, recursive=True, silent_errors=True)
raw_documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
documents = text_splitter.split_documents(raw_documents)
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_documents(documents, embeddings)
# Save vectorstore
with open("vectorstore.pkl", "wb") as f:
pickle.dump(vectorstore, f)
def get_chain(
vectorstore: VectorStore,
) -> ChatVectorDBChain:
"""Create a ChatVectorDBChain for question/answering."""
# callback manager for logging
manager = CallbackManager([StdOutCallbackHandler()])
# LLM interface (needs os.environ["OPENAI_API_KEY"] set)
question_gen_llm = OpenAI(
temperature=0,
verbose=True,
)
# Creates standalone question from chat history context
question_generator = LLMChain(
llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager, verbose=True
)
# Asks final question
doc_chain = load_qa_chain(
question_gen_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager, verbose=True
)
# Chains together QuestionGenerator->MemoryLookup->QuestionAnswering
qa = ChatVectorDBChain(
vectorstore=vectorstore,
combine_docs_chain=doc_chain,
question_generator=question_generator,
)
return qa
if __name__ == "__main__":
# vector-store docs (only necessary on first run) (delete vectorstore.pkl if you change the path)
if not os.path.exists("vectorstore.pkl"):
logger.info("No pickle file found, ingesting docs...")
ingest_docs(r"C:\Users\colli\PycharmProjects\langchain-master")
else:
logger.info("Using existing pickle file.")
# Load Up vectorstore
with open("vectorstore.pkl", "rb") as f:
vectorstore = pickle.load(f)
qa_chain = get_chain(vectorstore)
# Chat loop
chat_history = []
while True:
question = input("Question: ")
result = qa_chain({"question": question, "chat_history": chat_history})
chat_history.append((question, result["answer"]))
print(result['answer']) | [] |
2024-01-10 | aofenghanyue/EntryGuidance | guidance~multiMissileGuideInstance.py | # -*- coding: utf-8 -*-
# EditTime : 2021-09-29 20:22
# Author : Of yue
# File : multiMissileGuideInstance.py
# Intro :
from typing import List
import numpy as np
import multiset as glbs
from numpy import sin, cos, tan, arccos, arctan
from entity.missile import Missile
from guidance.MultiGuide import MultiMissileGuidance
from database.Constant import earth as e
from database.Atmosphere import atmosphereISA as ATM
from utils.common import coords_trans as ct
from utils.common import heading_angle, limit_num
from custom.EntryCorridor import cav_corridor
from utils.interpolate import Interp1
from utils.common import inv_alpha
from core.MultiMissileSim import MultiMissileSim
from scipy import integrate, optimize
import copy
class MultiMisGuideInstance(MultiMissileGuidance):
def __init__(self):
super(MultiMisGuideInstance, self).__init__()
# 各导弹起飞前准备时间
self.start_time: list = []
self.control_param_list = [] # 要从制导参数直接导入到控制参数的值名称
self.guide_phase: dict = {
"descent_phase1": self.descent_phase1,
"descent_phase2": self.descent_phase2,
"steady_glide_phase": self.steady_glide_phase,
"bias_explicit_guidance": self.bias_explicit_guidance
}
def init_custom(self, mis: List[Missile], tar: List[Missile] = [], meta={}):
self.control_param_list = getattr(glbs, 'CONTROL_PARAM_LIST', ["L", "D", "m", "attack_angle", "bank_angle"])
for index, (m, t) in enumerate(zip(mis, tar)):
m.guide.update(
{
# 飞行模式,
# default: 默认为正常制导
# online: 在线弹道仿真
"guide_flag": "default",
"alpha_max": m.p.alpha_max,
"alpha_sg": m.p.max_L2D_alpha,
"alpha_2": np.deg2rad(6),
"E2": -5.55e7,
"Rs": cav_corridor.average_h_e + e.Re,
"VTAEM": glbs.StatusParams[index]["MissileEndStatus"]["velocity"],
"HTAEM": glbs.StatusParams[index]["MissileEndStatus"]["height"],
"STAEM": glbs.StatusParams[index]["MissileEndStatus"]["s"],
"ETAEM": e.E(glbs.StatusParams[index]["MissileEndStatus"]["velocity"],
glbs.StatusParams[index]["MissileEndStatus"]["height"]),
"psiTAEM": glbs.StatusParams[index]["MissileEndStatus"]["heading_angle"],
"TTAEM": glbs.StatusParams[index]["MissileEndStatus"]["t"],
"guide_phase": "descent_phase1",
"err_tol": getattr(glbs, 'ERR_TOL', 1e-2),
"kgamma": getattr(glbs, 'K_GAMMA', 3), # 下降段攻角反馈系数
"k_PN": getattr(glbs, 'K_PN', 3), # 显式制导段比例导引系数
'k_sigma': getattr(glbs, 'K_SIGMA', -50), # 最大倾侧角中的系数
'k_gamma_sgp': getattr(glbs, 'K_GAMMA_SGP', 3), # 滑翔段TDCT反馈系数
'k_sigma_beg': getattr(glbs, 'K_SIGMA_BEG', 0.5 / 1e5), # 显式制导反馈系数
}
)
m.guide["EBR"] = [(m.E + m.guide["E2"]) / 2,
m.guide["E2"],
m.guide["ETAEM"] + 2e6]
m.guide["BR_flag"] = [False, False, False]
m.guide["update_EBR_flag"] = [False, False, False]
m.guide["BR_times"] = 0
m.guide["L2Dbsl_TAEM"] = cav_corridor.L2D_E(m.guide["ETAEM"])
# 初始倾侧角符号
great_circle_heading = heading_angle(m.status.longitude, m.status.latitude,
t.status.longitude, t.status.latitude)
m.guide["sgn_ini"] = 1 if great_circle_heading[0] > m.status.heading_angle else -1
# 设定末端航向角
m.guide["psiTAEM"] = great_circle_heading[1]
print(f"导弹{m.guide['index']}初始零偏航向角:{np.rad2deg(great_circle_heading[0])}\n"
f"预期末端航向角{np.rad2deg(great_circle_heading[1])}")
# 其它所有参数设置完后,导弹发射时间初始化
self.before_online_sim(mis, tar, meta)
for index, (m, t) in enumerate(zip(mis, tar)):
# 导弹发射时间
m.guide["launch_time"] = self.start_time[index]
def before_online_sim(self, mis: List[Missile], tar: List[Missile] = [], meta={}):
# 离线仿真阶段,设置导弹的参考弹道与发射时间
print('设置初始发射时间')
self.start_time = [0]
def is_launched(self, mis: Missile, tar: Missile = None, meta={}):
# 判断各导弹是否发射
if mis.launched:
return True
elif meta["t"] >= mis.guide["launch_time"]:
mis.launched = True
return mis.launched
def guide(self, mis: Missile, tar: Missile = None, meta={}):
# 决定积分精度
mis.guide["step_len"] = self.step_len_gen(mis)
# 解析来自导弹的数据
self.parse_param(mis, tar, meta)
# 制导
self.guide_phase[mis.guide["guide_phase"]](mis, tar, meta)
# 后续处理,将制导数据传入控制方程
self.guide2equation(mis, tar, meta)
def step_len_gen(self, mis: Missile):
# TODO 根据飞行状态判断积分精度
return 0.1
def parse_param(self, mis, tar, meta={}):
S = mis.p.reference_area
h = mis.status.height
v = mis.status.velocity
E = mis.E
Ma = mis.ma
t = mis.status.t
gamma = mis.status.path_angle
psi = mis.status.heading_angle
CL_plan, CD_plan = mis.aero.CLCD(Ma, self.attack_angle_plan(mis, E))
rho = ATM.rho(h)
q_inf = rho * v ** 2 * S / 2
CL, CD = mis.control.get('CL', CL_plan), mis.control.get('CD', CD_plan)
L, D = CL * q_inf, CD * q_inf
great_circle_heading = heading_angle(mis.status.longitude, mis.status.latitude,
tar.status.longitude, tar.status.latitude)
ref_psi = great_circle_heading[0]
delta_psi = mis.status.heading_angle - ref_psi
if delta_psi > np.pi:
delta_psi = delta_psi - 2 * np.pi
elif delta_psi < -np.pi:
delta_psi = delta_psi + 2 * np.pi
sgo = self.s_go(mis, tar, meta)
q = mis.p.k_Q * np.sqrt(rho) * pow(v, 3.15)
mis.guide.update({
'm': mis.p.m,
'S': S,
'E': E,
'Ma': Ma,
'v': v,
'h': h,
'CL': CL,
'CD': CD,
'q_inf': q_inf,
'L': L,
'D': D,
'great_circle_heading': great_circle_heading,
'delta_psi': delta_psi,
's_go': sgo,
'ref_psi': ref_psi,
'q': q,
'psi': psi,
'gamma': gamma,
't': t
})
def before_phase(self, mis: Missile, tar: Missile = None, meta={}):
self.update_kh(mis, tar, meta)
if not mis.guide["guide_phase"] == 'bias_explicit_guidance':
if mis.guide["E"] >= mis.guide["EBR"][2] + 1e6:
# 第三次反转前更新升阻比参数
self.update_L12D(mis, tar, meta)
else:
mis.guide["L12D_param"][1] = mis.guide["L12D_const"]
self.update_path_angle_sgp(mis, tar, meta)
if mis.guide["guide_phase"] == "steady_glide_phase":
self.update_EBR(mis, tar, meta)
self.BR(mis, tar, meta)
def descent_phase1(self, mis: Missile, tar: Missile = None, meta={}):
mis.guide["attack_angle"] = mis.guide["alpha_max"]
mis.guide["bank_angle"] = 0
if mis.status.t > 10 and mis.guide["dy"][4] > 0:
print(f'导弹{mis.guide["index"]}下降段第一段结束, t = {meta["t"]}')
mis.guide["guide_phase"] = "descent_phase2"
mis.guide["E_steady_glide_start"] = mis.guide["E"]
self.before_phase(mis, tar, meta)
mis.guide["dgamma0"] = mis.guide["gamma_sg"] - mis.status.path_angle
def descent_phase2(self, mis: Missile, tar: Missile = None, meta={}):
self.before_phase(mis, tar, meta)
delta_gamma = mis.guide["gamma_sg"] - mis.status.path_angle
mis.guide["attack_angle"] = delta_gamma / mis.guide["dgamma0"] * mis.guide["alpha_max"] + (
mis.guide["dgamma0"] - delta_gamma) / mis.guide["dgamma0"] * (
self.attack_angle_plan(mis, mis.guide["E"]) + mis.guide[
"kgamma"] * delta_gamma)
mis.guide["bank_angle"] = 0
if np.abs(delta_gamma) < mis.guide["err_tol"]:
print(f'下降段第二段结束, t = {meta["t"]}')
mis.guide["guide_phase"] = "steady_glide_phase"
def steady_glide_phase(self, mis: Missile, tar: Missile = None, meta={}):
self.before_phase(mis, tar, meta)
alpha_bsl = self.attack_angle_plan(mis, mis.guide["E"])
sigma_bsl = self.sigma_bsl(mis, tar, meta)
gamma_sg = mis.guide["gamma_sg"]
sigma_max = self.sigma_max(mis, tar, meta)
alpha_cmd, sigma_cmd = self.TDCT(mis, alpha_bsl, sigma_bsl, gamma_sg - mis.status.path_angle)
sigma_cmd = limit_num(sigma_cmd, abs_limit=sigma_max)
mis.guide["attack_angle"] = alpha_cmd
mis.guide["bank_angle"] = sigma_cmd
if mis.guide["E"] < mis.guide["EBR"][2]:
mis.guide["sgo_beg"] = mis.guide["s_go"]
mis.guide["guide_phase"] = 'bias_explicit_guidance'
print(f'平稳滑翔段结束, t = {meta["t"]}')
def bias_explicit_guidance(self, mis: Missile, tar: Missile = None, meta={}):
"""
:param mis:
:param tar:
:param meta:
:return:
"""
alpha, sigma = self.sigma_alpha_beg(mis, tar, meta)
sigma_max = self.sigma_max(mis, tar, meta)
sigma = limit_num(sigma, abs_limit=sigma_max)
alpha = limit_num(alpha, abs_limit=mis.guide["alpha_max"])
mis.guide["attack_angle"] = alpha
mis.guide["bank_angle"] = sigma
def update_kh(self, mis: Missile, tar: Missile, meta={}):
Rs = mis.guide["Rs"]
m_phi = mis.status.latitude
t_phi = tar.status.latitude
E = mis.guide["E"]
ET = mis.guide["ETAEM"]
V = mis.guide["v"]
VT = mis.guide["VTAEM"]
H = mis.guide["h"]
HT = mis.guide["HTAEM"]
L2DT = mis.guide["L2Dbsl_TAEM"]
ha = mis.guide["great_circle_heading"]
w = e.omega_e
L2D = mis.guide["CL"] / mis.guide["CD"] * cos(mis.control["bank_angle"])
hz1 = -2 * Rs * w * V * cos(m_phi) * sin(ha[0]) - \
Rs * w ** 2 * (e.Re + H) * cos(m_phi) * \
(cos(m_phi) - L2D * sin(m_phi) * cos(ha[0]))
hz1_t = -2 * Rs * w * VT * cos(t_phi) * sin(ha[1]) - \
Rs * w ** 2 * (e.Re + HT) * cos(t_phi) * \
(cos(t_phi) - L2DT * sin(t_phi) * cos(ha[1]))
kh1 = (hz1_t * E - hz1 * ET) / (E - ET), \
(hz1 - hz1_t) / (E - ET)
hz2 = w ** 2 * Rs * (e.Re + H) * L2D * sin(m_phi) * cos(m_phi) * cos(ha[0])
hz2_t = w ** 2 * Rs * (e.Re + HT) * L2DT * sin(t_phi) * cos(t_phi) * cos(ha[1])
kh2 = (hz2_t * E - hz2 * ET) / (E - ET), \
(hz2 - hz2_t) / (E - ET)
kh3 = -2 * w * Rs * (VT * E - ET * V) * (sin(t_phi) * E - sin(m_phi) * ET) / (E - ET) ** 2, \
-2 * w * Rs * (VT * sin(m_phi) + V * sin(t_phi)) * (E + ET) / (E - ET) ** 2 \
+ 4 * w * Rs * (VT * sin(t_phi) * E + V * sin(m_phi) * ET) / (E - ET) ** 2, \
-2 * w * Rs * (V - VT) * (sin(m_phi) - sin(t_phi)) / (E - ET) ** 2
hz4 = -2 * Rs * w * V * cos(m_phi) * sin(ha[0]) - \
Rs * w ** 2 * (e.Re + H) * cos(m_phi) ** 2
hz4_t = -2 * Rs * w * VT * cos(t_phi) * sin(ha[1]) - \
Rs * w ** 2 * (e.Re + HT) * cos(t_phi) ** 2
kh4 = (hz4_t * E - hz4 * ET) / (E - ET), \
(hz4 - hz4_t) / (E - ET)
mis.guide["kh"] = [
kh1, kh2, kh3, kh4
]
def update_path_angle_sgp(self, mis: Missile, tar: Missile, meta={}):
rho = ATM.rho(mis.guide["h"])
drho_dh = -0.00015 * rho
v = mis.guide["v"]
S = mis.guide["S"]
m = mis.guide["m"]
Rh = mis.guide["h"] + e.Re
sigma_bsl = self.sigma_bsl(mis, tar, meta)
alpha_bsl = self.alpha_bsl(mis, tar, meta)
CL_bsl, CD_bsl = mis.aero.CLCD(mis.ma, alpha_bsl)
D_bsl = CD_bsl * mis.guide["q_inf"]
dCL_dE = cav_corridor.interp_dCL_dE(mis.guide["E"])
d1 = rho * v ** 2 * S * cos(sigma_bsl) / 2 / m * dCL_dE + 2 / Rh + CL_bsl * rho * S * cos(sigma_bsl) / m
d2 = -CL_bsl * v ** 2 * S * cos(sigma_bsl) * drho_dh / 2 / e.g0 / m + 2 / Rh + CL_bsl * rho * S * cos(
sigma_bsl) / m + v ** 2 / Rh ** 2 / e.g0
mis.guide["gamma_sg"] = -D_bsl / m / e.g0 * d1 / d2
def alpha_bsl(self, mis: Missile, tar: Missile = None, meta={}):
return self.attack_angle_plan(mis, mis.guide['E'])
def sigma_bsl(self, mis: Missile, tar: Missile = None, meta={}):
if mis.guide["guide_phase"] == 'descent_phase1' or mis.guide["guide_phase"] == 'descent_phase2':
return 0
if mis.guide["guide_phase"] == 'steady_glide_phase':
return self.sigma_bsl_sgp(mis, tar, meta)
if mis.guide["guide_phase"] == 'bias_explicit_guidance':
return self.sigma_bsl_beg(mis, tar, meta)
def sigma_bsl_sgp(self, mis: Missile, tar: Missile = None, meta={}):
L1_L = self.L12D(mis) / (mis.guide["CL"] / mis.guide["CD"])
# assert 1 >= L1_L >= -1, '超出射程范围'
if 1 < L1_L or L1_L < -1:
return 0
res = mis.guide["sgn_ini"] * arccos(L1_L)
return mis.guide["sgn_ini"] * (-1) ** mis.guide["BR_times"] * res
def sigma_bsl_beg(self, mis: Missile, tar: Missile = None, meta={}):
# TODO 显式制导倾侧角
return 0
def alpha_beg(self, mis: Missile, tar: Missile = None, meta={}):
# TODO 显式制导攻角
return 0
def sigma_alpha_beg(self, mis: Missile, tar: Missile = None, meta={}):
"""
在线仿真时/显式制导 倾侧角与攻角参数
:param mis:
:param tar:
:param meta:
:return:
"""
V = mis.guide["v"]
gamma = mis.guide["gamma"]
h = mis.guide["h"]
phi = mis.status.latitude
psi = mis.guide["psi"]
psi_end = mis.guide["psiTAEM"]
delta_psi_end = 0 if not psi_end else psi_end - psi
delta_psi = mis.guide["delta_psi"]
delta_h = mis.guide["HTAEM"] - h
s_go = mis.guide["s_go"]
s_go2 = s_go - mis.guide["STAEM"]
s_LOS = np.linalg.norm([s_go2, delta_h])
gamma_LOS = np.arctan2(delta_h, s_go2)
a_tol = e.g0 - V ** 2 / (e.Re + h) - e.omega_e ** 2 * (e.Re + h) * cos(phi) ** 2 - 2 * V * e.omega_e * cos(
phi) * sin(psi)
aL1 = -mis.guide["k_PN"] * V ** 2 * sin(gamma - gamma_LOS) / s_LOS + a_tol
aL2_orig = aL1 * tan(self.sigma_bsl_sgp(mis))
aL2 = (-12 * V ** 2 * sin(delta_psi) / s_LOS - 6 * V ** 2 * sin(delta_psi_end) / s_LOS) *(mis.guide["sgo_beg"]-s_go)/mis.guide["sgo_beg"] \
+ aL2_orig*s_go2/mis.guide["sgo_beg"]
mis.guide["a_tol"] = a_tol
mis.guide["aL2_orig"] = aL2_orig
mis.guide["aL1"] = aL1
mis.guide["aL2"] = aL2
if mis.guide["guide_flag"] == "online":
# 反解攻角
CL = mis.guide["m"] * np.linalg.norm([aL1, aL2]) / mis.guide["q_inf"]
mis.guide["CL_beg"] = CL
alpha_sim = mis.aero.inverse_alpha(CL, mis.guide["Ma"])
sigma_sim = arctan(aL2 / aL1)
return alpha_sim, sigma_sim
else:
# sigma = arctan(aL2 / a_tol)
sigma = arctan(aL2 / aL1)
# 这里还是采用与参考飞行距离的误差来反馈
if mis.guide["refs"]:
k_sgo = 0.2 * s_go / mis.guide["sgo_beg"]
E_min = mis.guide["refs"].get('ETAEM_ref', -np.inf)
E_max = mis.guide["refs"].get('E_sim0', np.inf)
fs_go_ref = mis.guide["refs"].get('f_sgo_ref', None)
ft_ref = mis.guide["refs"].get('f_t_ref', None)
fl2d_ref = mis.guide["refs"].get('f_L12D_ref', None)
E_interp = limit_num(mis.guide["E"], interval_limit=[E_min,E_max],mode='interval')
sgo_ref = fs_go_ref(E_interp) if fs_go_ref else s_go
t_ref = ft_ref(E_interp) if ft_ref else mis.guide["t"]
L12D_ref = fl2d_ref(E_interp) if fl2d_ref else self.L12D(mis, mis.guide["E"])
s_go_ref2 = sgo_ref - k_sgo * mis.guide["v"] * (mis.guide["t"] - t_ref)
L12D_beg = L12D_ref + mis.guide["k_sigma_beg"] * (s_go - s_go_ref2)
# # 调试
# L12D_beg = L12D_ref
else:
L12D_beg = self.L12D(mis, mis.guide["E"])
mis.guide["ref_sgo"] = sgo_ref
mis.guide["ref_t"] = t_ref
mis.guide["ref_L12D"] = L12D_ref
mis.guide["L12D_beg"] = L12D_beg
alpha = inv_alpha(mis.guide["Ma"], L12D_beg / cos(sigma), mis.aero)
return alpha, sigma
def sigma_max(self, mis: Missile, tar: Missile = None, meta={}):
if not glbs.CONSIDER_SIGMA_MAX:
return np.inf
dHmindE = cav_corridor.dHmindE_E(mis.guide["E"])
D = mis.control.get('D', mis.guide['D'])
dHdE = -mis.guide["m"] * sin(mis.status.path_angle) / D
Hmin = cav_corridor.interp_Hmin_E(mis.guide["E"])
L1 = mis.guide["m"] * (e.g0 - mis.guide["v"] ** 2 / (e.Re + Hmin)
- e.omega_e ** 2 * (e.Re + Hmin) * cos(mis.status.latitude) ** 2
- 2 * mis.guide["v"] * e.omega_e * cos(mis.status.latitude)
* sin(mis.status.heading_angle))
Lmax = mis.guide["CL"] * 0.5 * ATM.rho(Hmin) * mis.guide["v"] ** 2 * mis.guide["S"]
if L1 > Lmax:
L1 = Lmax
sigma_max = arccos(L1 / Lmax) + mis.guide["k_sigma"] * (dHmindE - dHdE)
mis.guide["sigma_max_L1"] = L1
mis.guide["sigma_max_Lmax"] = Lmax
mis.guide["sigma_max_dHmindE"] = float(dHmindE)
mis.guide["sigma_max_dHdE"] = dHdE
mis.guide["sigma_max"] = sigma_max
return sigma_max
def TDCT(self, mis: Missile, alpha_bsl, sigma_bsl, delta_gamma):
"""
弹道阻尼抑制振荡
:param alpha_bsl: 攻角预指令
:param sigma_bsl: 倾侧角预指令
:param delta_gamma: 平稳滑翔弹道倾角 - 当前弹道倾角
:return: 攻角指令,倾侧角指令
"""
alpha_cmd = alpha_bsl + cos(sigma_bsl) * mis.guide["k_gamma_sgp"] * delta_gamma
sigma_cmd = sigma_bsl - sin(sigma_bsl) * mis.guide["k_gamma_sgp"] * delta_gamma / mis.guide["alpha_sg"]
return alpha_cmd, sigma_cmd
def update_EBR(self, mis: Missile, tar: Missile = None, meta={}):
if not mis.guide["update_EBR_flag"][0]:
mis.guide["update_EBR_flag"][0] = True
self.update_EBR1(mis, tar, meta)
elif not mis.guide["update_EBR_flag"][1] and mis.guide["E"] < mis.guide["EBR"][0]:
mis.guide["update_EBR_flag"][1] = True
self.update_EBR2(mis, tar, meta)
elif not mis.guide["update_EBR_flag"][2] and mis.guide["E"] < mis.guide["EBR"][1]:
mis.guide["update_EBR_flag"][2] = True
self.update_EBR3(mis, tar, meta)
def BR(self, mis: Missile, tar: Missile = None, meta={}):
if mis.guide["E"] < mis.guide["EBR"][0] and not mis.guide["BR_flag"][0]:
mis.guide["BR_flag"][0] = True
mis.guide["BR_times"] = 1
elif mis.guide["E"] < mis.guide["EBR"][1] and not mis.guide["BR_flag"][1]:
mis.guide["BR_flag"][1] = True
mis.guide["BR_times"] = 2
elif mis.guide["E"] < mis.guide["EBR"][2] and not mis.guide["BR_flag"][2]:
mis.guide["BR_flag"][2] = True
mis.guide["BR_times"] = 3
def update_EBR1(self, mis: Missile, tar: Missile = None, meta={}):
"""
更新EBR1,EBR2
:param mis:
:param tar:
:param meta:
:return:
"""
print("开始更新第一次、第二次反转点")
res = optimize.root(
lambda x: self.update_EBR1_fun(mis, x),
[mis.guide["EBR"][0], mis.guide["EBR"][1]],
jac=lambda x: self.update_EBR1_jac(mis, x),
method='lm',
tol=1e-2
)
print(res.message)
def update_EBR2(self, mis: Missile, tar: Missile = None, meta={}):
"""
更新EBR2,EBR3
:param mis:
:param tar:
:param meta:
:return:
"""
print("开始更新第二次、第三次反转点")
res = optimize.root(
lambda x: self.update_EBR2_fun(mis, x),
[mis.guide["EBR"][1], mis.guide["EBR"][2]],
jac=lambda x: self.update_EBR2_jac(mis, x),
method='lm',
tol=1e-2
)
print(res.message)
def update_EBR3(self, mis: Missile, tar: Missile = None, meta={}):
"""
更新最后一个反转点及参考升阻比
:param mis:
:param tar:
:param meta:
:return:
"""
print(f"开始更新第三次反转点、参考升阻比, E = {mis.guide['E']}")
# # 方法一
# VT = mis.guide["VTAEM"]
# TT = mis.guide["TTAEM"]
# mis.guide["L12D_const"] = mis.guide["L12D_param"][1]
# iter_obj0 = np.array([mis.guide["L12D_const"], mis.guide["EBR"][2]])
# meta["iter_params"] = iter_obj0
# res = self.simulation_online(mis, tar, meta)
# vf = res["V"]
# tf = res["t"]
# G0 = np.array([vf - VT, 0 if not TT else (tf - TT)])
# iter_obj1 = np.array([mis.guide["L12D_const"], mis.guide["EBR"][2] - 1e3 * (vf - VT)])
#
# for _ in range(glbs.MAX_EBR2_ITER):
# meta["iter_params"] = iter_obj1
# res = self.simulation_online(mis, tar, meta)
# vf = res["V"]
# tf = res["t"]
# G1 = np.array([vf - VT, 0 if not TT else (tf - TT)])
# p = iter_obj1 - iter_obj0
# q = np.array([-p[1], p[0]])
# q = 0.1 * q / np.linalg.norm(q)
# iter_obj2 = iter_obj1 + q
# meta["iter_params"] = iter_obj2
# res = self.simulation_online(mis, tar, meta)
# vf = res["V"]
# tf = res["t"]
# G2 = np.array([vf - VT, 0 if not TT else (tf - TT)])
# theta_p = np.arctan2(p[1], p[0])
# theta_q = np.arctan2(q[1], q[0])
# inv_temp = np.array([(G1 - G0) / np.linalg.norm(p), (G2 - G1) * 10]).T
# inv_jac = np.array([[cos(theta_p), cos(theta_q)],
# [sin(theta_p), sin(theta_q)]]).dot(np.linalg.inv(inv_temp))
# iter_obj0 = iter_obj1
# temp1 = inv_jac.dot(G1.reshape(2, 1))
# iter_obj1 = iter_obj1 - temp1.reshape(2)*0.1
# if abs(G1[0]) < 10 and abs(G1[1]) < 10:
# break
# 第二种方法
VT = mis.guide["VTAEM"]
TT = mis.guide["TTAEM"]
mis.guide["L12D_const"] = mis.guide["L12D_param"][1]
iter_obj0 = np.array([mis.guide["L12D_const"], mis.guide["EBR"][2]])
meta["iter_params"] = iter_obj0
res = self.simulation_online(mis, tar, meta)
vf = res["V"]
tf = res["t"]
G0 = np.array([vf - VT, 0 if not TT else (tf - TT)])
delta_EBR = -1e3 * (vf - VT)
iter_obj1 = np.array([mis.guide["L12D_const"], mis.guide["EBR"][2] +delta_EBR])
meta["iter_params"] = iter_obj1
res = self.simulation_online(mis, tar, meta)
vf = res["V"]
tf = res["t"]
G1 = np.array([vf - VT, 0 if not TT else (tf - TT)])
iter_obj2 = iter_obj0 + np.array([0.1, 0])
meta["iter_params"] = iter_obj2
res = self.simulation_online(mis, tar, meta)
vf = res["V"]
tf = res["t"]
G2 = np.array([vf - VT, 0 if not TT else (tf - TT)])
p = (G1-G0)/(delta_EBR/1E6)
q = (G2-G0)*10
B = np.array([-p[0]*G0[0] - p[1]*G0[1],
-q[0]*G0[0] - q[1]*G0[1]])
temp2 = p[0]*q[0]+p[1]*q[1]
A = np.array([[1E4+p[0]**2+p[1]**2, temp2],
[temp2, 1E4+q[0]**2]+q[1]**2])
var = np.linalg.solve(A, B)
var = var*np.array([1,1E6]) + iter_obj0
meta["iter_params"] = var
res = self.simulation_online(mis, tar, meta)
temp_result = res["total_data"]
mis.guide["L12D_const"] = var[0]
mis.guide["EBR"][2] = var[1]
mis.guide["refs"] = {
"E_sim0":res["E_sim0"],
"ETAEM_ref": res["E_sim1"],
"f_sgo_ref": res["interp_sgo_ref"],
"f_L12D_ref": res["interp_L12D_ref"],
"f_t_ref": res["interp_t_ref"]
}
def update_EBR1_fun(self, mis: Missile, x):
"""
用于牛顿迭代的方程组
"""
coef = [1, 1e3]
mis.guide["EBR"][0] = x[0]
mis.guide["EBR"][1] = x[1]
E = mis.guide["E"]
ETAEM = mis.guide["ETAEM"]
xc, dpsi = self.xC_dpsi(mis, ETAEM, E)
return [coef[0] * xc, coef[1] * (dpsi - 0)]
def update_EBR1_jac(self, mis: Missile, x):
"""
牛顿迭代雅可比矩阵
:param mis:
:param x:
:return:
"""
coef = [1, 1e3]
pxc1, pdpsi1 = self.dxc_dpsi1(mis, mis.guide["E"])
pxc2, pdpsi2 = self.dxc_dpsi2(mis, mis.guide["E"])
return np.array([[pxc1, pxc2], [coef[1] * pdpsi1, coef[1] * pdpsi2]])
def update_EBR2_fun(self, mis: Missile, x):
"""
用于牛顿迭代的方程组
"""
coef = [1, 1e3]
mis.guide["EBR"][1] = x[0]
mis.guide["EBR"][2] = x[1]
E = mis.guide["E"]
ETAEM = mis.guide["ETAEM"]
xc, dpsi = self.xC_dpsi(mis, ETAEM, E)
return [coef[0] * xc, coef[1] * (dpsi - 0)]
def update_EBR2_jac(self, mis: Missile, x):
"""
牛顿迭代雅可比矩阵
:param mis:
:param x:
:return:
"""
coef = [1, 1e3]
pxc2, pdpsi2 = self.dxc_dpsi2(mis, mis.guide["E"])
pxc3, pdpsi3 = self.dxc_dpsi3(mis, mis.guide["E"])
return np.array([[pxc2, pxc3], [coef[1] * pdpsi2, coef[1] * pdpsi3]])
def update_L12D(self, mis: Missile, tar: Missile, meta={}):
# TODO 除去重复代码
E0 = mis.guide["E_steady_glide_start"]
E1 = mis.guide["E2"]
E2 = mis.guide["ETAEM"]
L2DTAEM = mis.guide["L2Dbsl_TAEM"]
c1 = [0, 1 / (E0 - E1), -E1 / (E0 - E1)]
c2 = [0, 1 / (E1 - E0), E0 / (E0 - E1)]
c3 = [1 / (E1 - E2) ** 2, -2E1 / (E1 - E2) ** 2, E1 ** 2 / (E1 - E2) ** 2]
c4 = [0, 0, 1]
c4_c3 = [-1 / (E1 - E2) ** 2, 2E1 / (E1 - E2) ** 2, 1 - E1 ** 2 / (E1 - E2) ** 2]
# 这里终端能量取ETAEM
# TODO 补齐时间误差引起的升阻比
if mis.guide["E"] > E1:
L12D11 = ((mis.guide["s_go"] - mis.guide["STAEM"])/cos(mis.guide["delta_psi"]) - L2DTAEM * self.x_D(mis, E2, E1, c=c3)) \
/ (self.x_D(mis, E1, mis.guide["E"], c=[0, 0, 1]) + self.x_D(mis, E2, E1, c=c4_c3))
else:
L12D11 = ((mis.guide["s_go"] - mis.guide["STAEM"])/cos(mis.guide["delta_psi"]) - L2DTAEM * self.x_D(mis, E2, mis.guide["E"], c=c3)) \
/ self.x_D(mis, E2, mis.guide["E"], c=c4_c3)
L12D21 = L12D11
L12D12 = 0
L12D22 = 0
L2DE = L12D11 + L12D12
L2D_alpha = L12D21 + L12D22
mis.guide["L12D_param"] = [L2DE, L2D_alpha]
def L12D(self, mis: Missile, E=None):
if not E:
E = mis.guide["E"]
E0 = mis.guide["E_steady_glide_start"]
E1 = mis.guide["E2"]
E2 = mis.guide["ETAEM"]
params = mis.guide["L12D_param"]
L2DTAEM = mis.guide["L2Dbsl_TAEM"]
L2DE = params[0]
L2D_alpha = params[1]
if E >= E1:
return (E - E1) / (E0 - E1) * L2DE + (E0 - E) / (E0 - E1) * L2D_alpha
else:
return ((E1 - E) / (E1 - E2)) ** 2 * (L2DTAEM - L2D_alpha) + L2D_alpha
def L22D(self, mis: Missile, E=None):
if not E:
E = mis.guide["E"]
return abs(np.sqrt(cav_corridor.L2D_E(E) ** 2 - self.L12D(mis, E) ** 2))
def x_D(self, mis, E_end, E_start, c=None):
"""
射程解析解
:param mis:
:param E_end:
:param E_start:
:param c: 升阻比形式决定的系数(a2,a1,a0)
:return:
"""
if E_start < E_end:
return 0
if c is None:
# 如果不输入c的值,则认为是输出总射程,要乘上升阻比
E0 = mis.guide["E_steady_glide_start"]
E1 = mis.guide["E2"]
E2 = mis.guide["ETAEM"]
params = mis.guide["L12D_param"]
L2DTAEM = mis.guide["L2Dbsl_TAEM"]
L2DE = params[0]
L2D_alpha = params[1]
c1 = [0, 1 / (E0 - E1), -E1 / (E0 - E1)]
c2 = [0, 1 / (E1 - E0), E0 / (E0 - E1)]
c3 = [1 / (E1 - E2) ** 2, -2E1 / (E1 - E2) ** 2, E1 ** 2 / (E1 - E2) ** 2]
c4 = [0, 0, 1]
if E_end >= E1:
return L2DE * self.x_D(mis, E_end, E_start, c=c1) + L2D_alpha * self.x_D(mis, E_end, E_start, c=c2)
elif E_start <= E1:
return (L2DTAEM - L2D_alpha) * self.x_D(mis, E_end, E_start, c=c3) + L2D_alpha * self.x_D(mis, E_end,
E_start, c=c4)
else:
return L2DE * self.x_D(mis, E1, E_start, c=c1) + L2D_alpha * self.x_D(mis, E1, E_start, c=c2) \
+ (L2DTAEM - L2D_alpha) * self.x_D(mis, E_end, E1, c=c3) \
+ L2D_alpha * self.x_D(mis, E_end, E1, c=c4)
# 如果输入c,则要计算各子段的射程(不乘升阻比)
a2, a1, a0 = c[0], c[1], c[2]
kh1 = mis.guide["kh"][0]
kh4 = mis.guide["kh"][3]
Rs = mis.guide["Rs"]
temp1 = a1 - e.mu / 2 / Rs * a2
temp2 = (e.mu / 2 / Rs) ** 2 * a2 - e.mu / 2 / Rs * a1 + a0
temp3 = np.log((2 * E_end + e.mu / Rs) / (2 * E_start + e.mu / Rs))
temp4 = 1 / (2 * E_end + e.mu / Rs) - 1 / (2 * E_start + e.mu / Rs)
fx1 = e.Re * a2 / 4 * (E_end ** 2 - E_start ** 2) + e.Re / 2 * temp1 * (
E_end - E_start) + e.Re / 2 * temp2 * temp3
fx2 = e.Re / 2 * (a2 * (E_end - E_start) / 2 + (a1 - e.mu / Rs * a2) / 2 * temp3
- temp2 * temp4)
fx3 = e.Re / 8 * a2 * temp3 - e.Re / 4 * (a1 - e.mu / Rs * a2) * temp4 - e.Re / 4 * temp2 * (
1 / (2 * E_end + e.mu / Rs) ** 2 - 1 / (2 * E_start + e.mu / Rs) ** 2)
res = (1 + kh1[1] / 2 + kh1[1] * kh4[1] / 4) * fx1 + \
(kh1[0] * (1 + kh4[1] / 2) - kh1[1] / 2 * (e.mu / Rs - kh4[0]) - e.mu / 2 / Rs * kh1[1] * kh4[1]) * fx2 + \
(kh1[0] - e.mu / 2 / Rs * kh1[1]) * (kh4[0] - e.mu / 2 / Rs * kh4[1]) * fx3
return res
def h_m(self, mis: Missile, E=None):
if not E:
E = mis.guide["E"]
return 2 * E + e.mu / mis.guide["Rs"]
def xC_dpsi(self, mis, E_end=None, E_start=None):
if not E_end:
E_end = mis.guide["ETAEM"]
if not E_start:
E_start = mis.guide["E"]
EBR = mis.guide["EBR"]
delta_psi = mis.guide["delta_psi"]
integ_xc = integrate.quad(
lambda x: sin(self.x_D(mis, E_end, x) / e.Re) * self.f2(mis, x),
E_start, E_end
)
integ_dpsi = integrate.quad(
lambda x: cos(self.x_D(mis, E_end, x) / e.Re) * self.f2(mis, x),
E_start, E_end
)
# 判断是第几次反转后
if E_start > EBR[0]:
F1, G1 = self.FG(mis, EBR[0], E_start)
F2, G2 = self.FG(mis, EBR[1], EBR[0])
elif E_start >= EBR[1]:
F1, G1 = 0, 0
F2, G2 = self.FG(mis, EBR[1], E_start)
else:
raise "在第二次反转后不采用解析解更新反转点"
F3, G3 = self.FG(mis, EBR[2], EBR[1])
F4, G4 = self.FG(mis, E_end, EBR[2])
xc = e.Re * (delta_psi * sin(self.x_D(mis, E_end, E_start) / e.Re) - integ_xc[0]
- mis.guide["sgn_ini"] * (F1 - F2 + F3 - F4))
dpsi = delta_psi * cos(self.x_D(mis, E_end, E_start) / e.Re) - integ_dpsi[0] \
- mis.guide["sgn_ini"] * (G1 - G2 + G3 - G4)
return xc, dpsi
def dxc_dpsi1(self, mis: Missile, E):
xd = self.x_D(mis, mis.guide["ETAEM"], mis.guide["EBR"][0])
f4 = self.f4(mis, mis.guide["EBR"][0])
pxc = -2 * mis.guide["sgn_ini"] * e.Re * sin(xd / e.Re) * f4
pdpsi = -2 * mis.guide["sgn_ini"] * cos(xd / e.Re) * f4
return pxc, pdpsi
def dxc_dpsi2(self, mis: Missile, E):
xd = self.x_D(mis, mis.guide["ETAEM"], mis.guide["EBR"][1])
f4 = self.f4(mis, mis.guide["EBR"][1])
pxc = 2 * mis.guide["sgn_ini"] * e.Re * sin(xd / e.Re) * f4
pdpsi = 2 * mis.guide["sgn_ini"] * cos(xd / e.Re) * f4
return pxc, pdpsi
def dxc_dpsi3(self, mis: Missile, E):
xd = self.x_D(mis, mis.guide["ETAEM"], mis.guide["EBR"][2])
f4 = self.f4(mis, mis.guide["EBR"][2])
pxc = -2 * mis.guide["sgn_ini"] * e.Re * sin(xd / e.Re) * f4
pdpsi = -2 * mis.guide["sgn_ini"] * cos(xd / e.Re) * f4
return pxc, pdpsi
def f2(self, mis: Missile, E):
kh3 = mis.guide["kh"][2]
kh4 = mis.guide["kh"][3]
Rs = mis.guide["Rs"]
L12D = self.L12D(mis, E)
return -(L12D * (kh3[0] + kh3[1] * E + kh3[2] * E ** 2)) / (
(2 * E + 2 * e.mu / Rs) * (2 * E + e.mu / Rs)
) * (1 + (kh4[0] + kh4[1] * E) / (2 * E + e.mu / Rs))
def f4(self, mis: Missile, E):
if not E:
E = mis.guide["E"]
Rs = mis.guide["Rs"]
L22D = self.L22D(mis, E)
kh2 = mis.guide["kh"][1]
kh4 = mis.guide["kh"][3]
h2 = kh2[0] + kh2[1] * E
h4 = kh4[0] + kh4[1] * E
return -L22D / (2 * E + 2 * e.mu / Rs) * (
1 + h2 / (2 * E + e.mu / Rs) +
(h2 * h4) / (2 * E + e.mu / Rs) ** 2
)
def FG(self, mis: Missile, E_end, E_start, E_xD=None):
if not E_xD:
E_xD = mis.guide["ETAEM"]
F = integrate.quad(lambda x: sin(self.x_D(mis, E_xD, x) / e.Re) * self.f4(mis, x),
E_start, E_end, epsabs=1e-7)
G = integrate.quad(lambda x: cos(self.x_D(mis, E_xD, x) / e.Re) * self.f4(mis, x),
E_start, E_end, epsabs=1e-7)
return F[0], G[0]
def simulation_online(self, mis: Missile, tar: Missile = None, meta={}):
sim: MultiMissileSim = copy.deepcopy(meta["simulation"])
sim.is_main = False
index = mis.guide["index"]
for m in sim.mis:
m.guide["guide_flag"] = "online"
if not m.guide["index"] == index:
m.guide["end_guide"] = True
else:
m.guide["L12D_const"] = meta["iter_params"][0]
m.guide["EBR"][2] = meta["iter_params"][1]
sim.simulation()
result = sim.db[index].data
misT = sim.mis[index]
s_go = result['s_go']
E_node = result['E']
interp_sgo_ref = Interp1(E_node, s_go).pre
L12D = result["L12D"]
interp_L12D_ref = Interp1(E_node, L12D).pre
t_list = result["t"]
interp_t_ref = Interp1(E_node, t_list).pre
V = misT.guide["v"]
t = misT.status.t
return {
"E_sim0":mis.guide["E"],
"E_sim1":misT.guide["E"],
"V": V,
"t": t,
"interp_sgo_ref": interp_sgo_ref,
"interp_L12D_ref": interp_L12D_ref,
"interp_t_ref": interp_t_ref,
"total_data": result
}
def guide2equation(self, mis: Missile, tar: Missile = None, meta={}):
for item in self.control_param_list:
mis.control[item] = mis.guide[item]
mis.control['CL'], mis.control['CD'] = mis.aero.CLCD(mis.ma, mis.guide["attack_angle"])
mis.control['L'] = mis.control['CL'] * mis.guide['q_inf']
mis.control['D'] = mis.control['CD'] * mis.guide['q_inf']
mis.guide["L12D"] = mis.control['CL'] / mis.control['CD'] * cos(mis.guide["bank_angle"])
mis.guide["L12D_E"] = mis.guide.get('L12D_param', [None, None])[0]
mis.guide["L12D_alpha"] = mis.guide.get('L12D_param', [None, None])[1]
def attack_angle_plan(self, mis, E=None):
if not E:
E = mis.guide["E"]
a1 = mis.guide["alpha_sg"]
a2 = mis.guide["alpha_2"]
E2 = mis.guide["E2"]
E_TAEM = mis.guide["ETAEM"]
return [a1, ((E2 - E) / (E2 - E_TAEM)) ** 2 * (a2 - a1) + a1][E < E2]
def s_go(self, mis: Missile, tar: Missile = None, meta={}):
x_mis = ct.coordinate_transformation(0, lamb=mis.status.longitude, phi=mis.status.latitude)
x_tar = ct.coordinate_transformation(0, lamb=tar.status.longitude, phi=tar.status.latitude)
return e.Re * arccos(x_mis.dot(x_tar))
def mis_end_guide(self, mis: Missile, tar: Missile, meta={}):
if mis.guide.get('s_go', np.inf) < mis.guide["STAEM"]:
mis.guide["end_guide"] = True
if mis.guide.get('E', np.inf) < (mis.guide["ETAEM"] - 5E5) and mis.guide["guide_flag"] == 'online':
mis.guide["end_guide"] = True
# if mis.status.t > 1000:
# mis.guide["end_guide"] = True
| [] |
2024-01-10 | aofenghanyue/EntryGuidance | simulation.py | import settings as glbs
from entity.missile import Missile
from guidance.guide import Guidance
from store.dataSave import DataSave
from utils.integral import Integral
class TrajectorySimulation:
def __init__(self):
# 最大迭代次数
self.max_iter = getattr(glbs, "MAXITER", 0)
# 当前迭代次数
self.iter = 0
# 当前积分步长
self.h = 0
# 全局时间
self.t = getattr(glbs, "t0", 0)
# 导弹与其对应目标
self.mis: Missile = Missile()
self.tar: Missile = Missile()
# 制导模块
self.guide: Guidance = Guidance()
self.guide_params_list = getattr(glbs, "ParamsToGuide", [])
self.guide_params: dict = {}
# 积分模块
self.integral = Integral()
# 数据存储模块 可以是列表,对应不同的导弹与目标
self.db = DataSave()
self.db_save_dict: dict = {} # 每次存储值的临时字典
self.init()
def init(self, mis=Missile(), tar=Missile(), guide=Guidance(), integ=Integral(), db=DataSave()):
"""
初始化导弹,目标,制导模式,积分,存储模块等信息
:return:
"""
pass
def step_len(self) -> float:
"""
计算每一步的积分步长
:return: 当前步积分步长
"""
pass
def simulation(self):
self._before_simulation()
while self.next():
self._after_one_step()
self._after_simulation()
def next(self):
"""
一个循环
:return:
"""
if self._is_continue():
self.h = self.step_len()
if self.one_step_guide():
# 此处应该先保存上一步的状态值,再改变状态
self.after_guide()
# 积分且更新状态
self.one_step_integral() # 包含了self.one_step_update_status()
return True
return False
def one_step_guide(self):
"""
1.产生控制指令
:return: True代表单步制导执行成功
False代表制导结束
"""
# 产生制导相关参数
self.gen_guide_params()
# 产生制导指令
if self.guide.one_step_guide(self.mis, self.tar, self.guide_params):
return True
else:
return False
def one_step_integral(self):
"""
2.各个体单步积分
:return:
"""
self.one_step_integral_object(self.mis)
self.one_step_integral_object(self.tar)
def one_step_integral_object(self, obj: Missile):
if obj.launched:
# 如果导弹已发射则更新状态
x, y0 = obj.status.x, obj.status.y
y_next, dy = self.integral.next_step(obj.equation.equation, x, y0, self.h, obj.control, need_dy=True)
x_next = x + self.h
if dy is not None:
obj.guide["dy"] = dy
# 3.各个体更新状态
status_update_data = {k: v for k, v in zip(obj.status.integral_key, y_next)}
status_update_data.update({obj.status.independent_key: x_next})
obj.status.change_stat(status_update_data)
def gen_guide_params(self):
"""
产生要传入制导模块的参数
:return: None
"""
for param in self.guide_params_list:
if param == "self":
self.guide_params["simulation"] = self
else:
self.guide_params[param] = getattr(self, param, None)
def save_data(self) -> None:
"""
存储数据
:return: None
"""
self.db_save_dict = {"global_t": self.t}
# 默认只存储时间、导弹状态和控制量
self.db_save_dict.update(self.mis.status.status_dict())
self.db_save_dict.update(self.mis.control)
self.db.update(self.db_save_dict)
def _is_continue(self) -> bool:
"""判断是否继续仿真"""
if self.iter > self.max_iter:
return False
if not self.is_continue():
return False
return True
def is_continue(self):
return True
def after_guide(self):
"""
制导指令计算完成后进行
可存储导弹及其控制指令
:return:
"""
self.save_data()
def _after_one_step(self):
"""每个积分循环完成后需要进行的操作
可以存储数据等,但一般存储数据在制导完成之后进行"""
# 迭代次数+1
self.iter += 1
# 时间前进h
self.t += self.h
self.after_one_step()
def after_one_step(self):
pass
def _before_simulation(self):
print("仿真开始")
self.before_simulation()
def before_simulation(self):
pass
def _after_simulation(self):
"""仿真结束后需要做的事情
例如将结果输出为图像"""
print("仿真结束")
self.after_simulation()
pass
def after_simulation(self):
pass
def from_mis_guide(self, param_list: list):
return {param: self.mis.guide.get(param, None) for param in param_list}
| [] |
2024-01-10 | aofenghanyue/EntryGuidance | customGuidance.py | import numpy as np
import settings as glbs
from numpy import sin, cos, tan, arccos, arctan
from entity.missile import Missile
from guidance.guide import Guidance
from database.Constant import earth as e
from database.Atmosphere import atmosphereISA as ATM
from utils.common import coords_trans as ct
from utils.common import heading_angle, limit_num
from custom.EntryCorridor import cav_corridor
from utils.interpolate import Interp1
from simulation import TrajectorySimulation
from scipy import integrate, optimize
import copy
# 计算制导参数
class CustomGuidance(Guidance):
def __init__(self):
super(CustomGuidance, self).__init__()
# 制导模式
self.guide_mode = 0
self.guide_phase: dict = {
"descent_phase1": self.descent_phase1,
"descent_phase2": self.descent_phase2,
"steady_glide_phase": self.steady_glide_phase,
"altitude_adjustment_phase": self.altitude_adjustment_phase
}
# 参数初始化
self.alpha_max = 0 # 导弹最大攻角
self.alpha_sgp = 0 # 平稳滑翔攻角
self.alpha_2 = np.deg2rad(6)
self.V_TAEM = 0 # 终端速度
self.E_TAEM = 0 # 终端能量
self.H_TAEM = 0 # 终端高度
self.E_alpha = -5.55e7
self.S_TAEM = 0 # 终端距离
self.EBR1 = 0 # 首次倾侧角翻转能量
self.EBR2 = 0 # 二次倾侧角翻转能量
self.sgn = 1 # 初始倾侧角方向
self.kgamma = 0 # 下降段攻角反馈系数
self.err_tol = 0 # 进入平稳滑翔段时弹道倾角与平稳滑翔弹道倾角的容忍误差
self.EBR1_update_flag = [False, False, False] # 三次更新EBR1
self.EBR2_update_flag = [False, False] # 两次更新EBR2
self.bank_reverse_flag = [False, False] # 两次倾侧角反转
self.glide_E0 = 0 # 初次进入滑翔段的能量
self.bank_reverse_time = 0 # 倾侧角反转次数,实际上用不到,因为只反转两次,可以在sigma_bsl中用两种情况表示
self.k_gamma_sgp = 0 # 滑翔段TDCT反馈系数
self.k_gamma_aap = 0 # 姿态调整段倾侧角反馈系数
self.control_param_list = [] # 要从制导参数直接导入到控制参数的值名称
self.k_sigma = 0 # 最大倾侧角中的系数
self.k_alpha = 0 # AAP中攻角对于飞行距离偏差的反馈系数
self.allow_update_param = True # 是否允许更新EBR2与alpha2,如果是在线制导,则设为False
self.accurate_mode = 0 # 仿真精度
def init(self, missile: Missile, target: Missile = None, meta={}):
print('正在初始化参数')
self.alpha_max = missile.p.alpha_max
self.alpha_sgp = missile.p.max_L2D_alpha
self.V_TAEM = glbs.MissileEndStatus["velocity"]
self.H_TAEM = glbs.MissileEndStatus["height"]
self.E_TAEM = e.E(self.V_TAEM, self.H_TAEM)
self.S_TAEM = glbs.MissileEndStatus["s"]
self.guide_mode = "descent_phase1"
self.EBR1 = (missile.E + self.E_alpha) / 2
self.EBR2 = self.E_alpha
self.err_tol = getattr(glbs, 'ERR_TOL', 1e-2)
self.kgamma = getattr(glbs, 'K_GAMMA', 3)
self.k_gamma_sgp = getattr(glbs, 'K_GAMMA_SGP', 3)
self.k_gamma_aap = getattr(glbs, 'K_GAMMA_AAP', 3)
self.k_sigma = getattr(glbs, 'K_SIGMA', -50)
self.k_alpha = getattr(glbs, 'K_ALPHA', 5 * np.pi / 1.8e7)
self.control_param_list = getattr(glbs, 'CONTROL_PARAM_LIST', ["L", "D", "m", "attack_angle", "bank_angle"])
# 初始方向角偏大则倾侧角为负
great_circle_heading = heading_angle(missile.status.longitude, missile.status.latitude,
target.status.longitude, target.status.latitude)
self.sgn = 1 if great_circle_heading[0] > missile.status.heading_angle else -1
# 只需要计算一次的制导参数初始化
missile.guide["L2Dbsl_TAEM"] = cav_corridor.L2D_E(self.E_TAEM)
def guide(self, mis: Missile, tar: Missile = None, meta={}):
self.parse_param(mis, tar, meta)
self.integral_accurate(mis, tar, meta)
self.guide_phase[self.guide_mode](mis, tar, meta)
self.guide2control(mis, tar, meta)
# 调试时控制程序停止:
# if self.guide_mode == "steady_glide_phase":
# self.end_guide(flag=True)
# if meta["t"] >= 1000:
# self.end_guide(flag=True)
def parse_param(self, mis, tar, meta={}):
"""
解析导弹及目标参数
:param mis: 导弹
:param tar: 目标
:return:
"""
S = mis.p.reference_area
h = mis.status.height
v = mis.status.velocity
psi = mis.status.heading_angle
E = mis.E
Ma = mis.ma
CL_plan, CD_plan = mis.aero.CLCD(Ma, self.attack_angle_plan(E))
rho = ATM.rho(h)
q_inf = rho * v ** 2 * S / 2
# 真实升阻系数按照上一步来取,如果是第一次,则取参考值
CL, CD = mis.control.get('CL', CL_plan), mis.control.get('CD', CD_plan)
L, D = CL * q_inf, CD * q_inf
great_circle_heading = heading_angle(mis.status.longitude, mis.status.latitude,
tar.status.longitude, tar.status.latitude)
ref_psi = great_circle_heading[0]
delta_psi = psi - ref_psi
if delta_psi > np.pi:
delta_psi = delta_psi - 2 * np.pi
elif delta_psi < -np.pi:
delta_psi = delta_psi + 2 * np.pi
sgo = self.s_go(mis, tar, meta)
q = mis.p.k_Q * np.sqrt(rho) * pow(v, 3.15)
mis.guide.update({
'm': mis.p.m,
'S': S,
'E': E,
'Ma': Ma,
'v': v,
'h': h,
'CL': CL,
'CD': CD,
'q_inf': q_inf,
'L': L,
'D': D,
'great_circle_heading': great_circle_heading,
'delta_psi': delta_psi,
's_go': sgo,
'ref_psi': ref_psi,
'q': q,
})
def integral_accurate(self, mis: Missile, tar: Missile = None, meta={}):
self.accurate_mode = 0
d_EBR1 = mis.guide["E"] - self.EBR1
d_EBR2 = mis.guide["E"] - self.EBR2
d_s_go = mis.guide["s_go"] - self.S_TAEM
if self.guide_mode == 'descent_phase1' or self.guide_mode == 'descent_phase2':
self.accurate_mode = 1
if 0 < d_EBR1 < 1e5:
self.accurate_mode = 1
if 0 < d_EBR2 < 1e5:
if d_EBR2 < 1e3:
self.accurate_mode = 3
elif d_EBR2 < 1e4:
self.accurate_mode = 2
else:
self.accurate_mode = 1
if 0 < d_s_go < 1e4:
if d_s_go < 2e3:
self.accurate_mode = 3
elif d_s_go < 5e3:
self.accurate_mode = 2
else:
self.accurate_mode = 1
def attack_angle_plan(self, E):
a1 = self.alpha_sgp
a2 = self.alpha_2
return [a1, ((self.E_alpha - E) / (self.E_alpha - self.E_TAEM)) ** 2 * (a2 - a1) + a1][E < self.E_alpha]
def descent_phase1(self, mis: Missile, tar: Missile = None, meta={}):
"""
mode: descent_phase1
初始下降段,当\dot{\gamma}<0时,输出最大攻角
next_mode: 当dot gamma = 0 (第一次>0)时,记录倾角偏差值,进入descent_phase2
:return: None
"""
# 导弹第一次制导即启动
mis.launched = True
mis.guide["attack_angle"] = self.alpha_max
mis.guide["bank_angle"] = 0
if mis.status.t > 10 and mis.guide["dy"][4] > 0:
print(f'下降段第一段结束, t = {meta["t"]}')
self.guide_mode = "descent_phase2"
self.before_phase(mis, tar, meta)
mis.guide["dgamma0"] = mis.guide["gamma_sg"] - mis.status.path_angle
def descent_phase2(self, mis: Missile, tar: Missile = None, meta={}):
"""
下降段的第二段,用来调整攻角,平稳过渡到滑翔段
:param mis:
:param tar:
:param meta:
next_mode: steady_glide_phase. 当弹道倾角与平稳滑翔弹道倾角小于setting中的err_tol时进入下一阶段
:return:
"""
self.before_phase(mis, tar, meta)
delta_gamma = mis.guide["gamma_sg"] - mis.status.path_angle
mis.guide["attack_angle"] = delta_gamma / mis.guide["dgamma0"] * self.alpha_max + (
mis.guide["dgamma0"] - delta_gamma) / mis.guide["dgamma0"] * (
self.attack_angle_plan(mis.guide["E"]) + self.kgamma * delta_gamma)
mis.guide["bank_angle"] = 0
if np.abs(delta_gamma) < self.err_tol:
print(f'下降段第二段结束, t = {meta["t"]}')
self.guide_mode = "steady_glide_phase"
self.glide_E0 = mis.guide["E"]
def steady_glide_phase(self, mis: Missile, tar: Missile = None, meta={}):
"""
平稳滑翔段
:param mis:
:param tar:
:param meta:
next_mode: altitude_adjustment_phase. 当第二次反转倾侧角后进入下一段.
:return:
"""
self.before_phase(mis, tar, meta)
alpha_bsl = self.attack_angle_plan(mis.guide["E"])
sigma_bsl = self.sigma_bsl(mis, tar, meta)
gamma_sg = mis.guide["gamma_sg"]
sigma_max = self.sigma_max(mis, tar, meta)
alpha_cmd, sigma_cmd = self.TDCT(alpha_bsl, sigma_bsl, gamma_sg - mis.status.path_angle)
sigma_cmd = limit_num(sigma_cmd, abs_limit=sigma_max)
mis.guide["attack_angle"] = alpha_cmd
mis.guide["bank_angle"] = sigma_cmd
if mis.guide["E"] < self.EBR2:
mis.guide["sgo_EBR2"] = mis.guide["s_go"]
self.guide_mode = "altitude_adjustment_phase"
print(f'平稳滑翔段结束, t = {meta["t"]}')
def altitude_adjustment_phase(self, mis: Missile, tar: Missile = None, meta={}):
self.before_phase(mis, tar, meta)
alpha_bsl = self.attack_angle_plan(mis.guide["E"])
sigma_bsl = self.sigma_bsl(mis, tar, meta)
f_sgo_ref = mis.guide.get('f_sgo_ref', None)
sgo_ref = f_sgo_ref(mis.guide["E"]) if f_sgo_ref else mis.guide["s_go"]
mis.guide["sgo_ref"] = sgo_ref
delta_gamma = mis.guide["gamma_sg"] - mis.status.path_angle
alpha_cmd = alpha_bsl + self.k_alpha * (mis.guide["s_go"] - sgo_ref)
sigma_cmd = sigma_bsl - sin(sigma_bsl) * self.k_gamma_aap * delta_gamma / self.alpha_sgp
mis.guide["attack_angle"] = alpha_cmd
mis.guide["bank_angle"] = sigma_cmd
def before_phase(self, mis: Missile, tar: Missile = None, meta={}):
self.update_kh(mis, tar, meta)
if not self.guide_mode == 'altitude_adjustment_phase':
self.update_L1D(mis, tar, meta)
self.update_path_angle_sgp(mis, tar, meta)
if self.guide_mode == 'steady_glide_phase':
if self.need_update_EBR1(mis, tar, meta):
self.update_EBR1(mis, tar, meta)
if self.need_update_EBR2(mis, tar, meta):
self.update_EBR2_alpha2(mis, tar, meta)
self.bank_reverse(mis, tar, meta)
def need_update_EBR1(self, mis: Missile, tar: Missile = None, meta={}):
if not self.EBR1_update_flag[0]:
if self.guide_mode == 'steady_glide_phase':
self.EBR1_update_flag[0] = True
return True
elif not self.EBR1_update_flag[1]:
if mis.guide["E"] < (self.EBR1 + self.glide_E0) / 2:
self.EBR1_update_flag[1] = True
return True
elif not self.EBR1_update_flag[2]:
dEdt = mis.control["D"] * mis.guide["v"] / mis.guide["m"]
delta_t = 100
if mis.guide["E"] < (self.EBR1 + dEdt * delta_t):
self.EBR1_update_flag[2] = True
return True
return False
def need_update_EBR2(self, mis: Missile, tar: Missile = None, meta={}):
if not self.allow_update_param:
return False
if not self.EBR2_update_flag[0]:
if self.bank_reverse_flag[0]:
self.EBR2_update_flag[0] = True
return True
elif not self.EBR2_update_flag[1]:
dEdt = mis.control["D"] * mis.guide["v"] / mis.guide["m"]
delta_t = 120
if mis.guide["E"] < (self.EBR2 + dEdt * delta_t):
self.EBR2_update_flag[1] = True
return True
return False
def update_EBR2_alpha2(self, mis: Missile, tar: Missile = None, meta={}):
print(f'\n在线更新alpha2: t = {meta["t"]}')
self.update_alpha2(mis, tar, meta)
print('在线更新alpha2完毕\n')
print(f'\n在线更新EBR2: t = {meta["t"]}')
# 只更新一次EBR2
# if self.EBR2_update_flag[1] == False:
# self.update_EBR2(mis, tar, meta)
self.update_EBR2(mis, tar, meta)
print('在线更新EBR2完毕\n')
# if not self.EBR2_update_flag[1]:
# print('\n在线更新EBR2:')
# self.update_EBR2(mis, tar, meta)
# print('在线更新EBR2完毕\n')
# print('\n在线更新alpha2:')
# self.update_alpha2(mis, tar, meta)
# print('在线更新alpha2完毕\n')
# else:
# print('\n在线更新alpha2:')
# self.update_alpha2(mis, tar, meta)
# print('在线更新alpha2完毕\n')
# print('\n在线更新EBR2:')
# self.update_EBR2(mis, tar, meta)
# print('在线更新EBR2完毕\n')
def update_alpha2(self, mis: Missile, tar: Missile = None, meta={}):
sim = copy.deepcopy(meta["simulation"])
f_stat = self.simulation_online(sim)
VT = self.V_TAEM
m = mis.guide['m']
alpha_bslf = f_stat['alpha_bsl']
alpha2f = f_stat['alpha2']
CLf = f_stat['CL']
Hf = f_stat['h']
vf = f_stat['v']
phif = f_stat['phi']
psif = f_stat['psi']
CL_alpha = mis.aero.CL_alpha(vf / ATM.a(Hf))
q_Sf = f_stat['q_S']
q_S_T = 0.5 * ATM.rho(self.H_TAEM) * VT ** 2 * mis.p.reference_area
self.alpha_2 = alpha_bslf + CLf * (q_Sf - q_S_T) / (CL_alpha * q_S_T) \
+ m * (vf ** 2 - VT ** 2) / (CL_alpha * q_S_T * (e.Re + Hf)) \
+ 2 * m * (vf - VT) * e.omega_e * cos(phif) * sin(psif) / (CL_alpha * q_S_T)
def update_EBR2(self, mis: Missile, tar: Missile = None, meta={}):
sim_old = copy.deepcopy(meta["simulation"])
sim_result_old = self.simulation_online(sim_old)
vf_old = sim_result_old['v']
interp_sgo_ref = sim_result_old['interp_sgo_ref']
EBR2_old, EBR2_new = self.EBR2, self.EBR2 - 1e3 * (vf_old - self.V_TAEM)
for _ in range(glbs.MAX_EBR2_ITER):
sim_new = copy.deepcopy(meta["simulation"])
sim_new.guide.EBR2 = EBR2_new
sim_result = self.simulation_online(sim_new)
vf_new = sim_result['v']
interp_sgo_ref = sim_result['interp_sgo_ref']
EBR2_update = EBR2_new - (vf_new - self.V_TAEM) * (EBR2_new - EBR2_old) / (vf_new - vf_old)
EBR2_old, vf_old = EBR2_new, vf_new
EBR2_new = EBR2_update
if np.abs(vf_new - self.V_TAEM) < 1:
break
self.EBR2 = EBR2_new
# 保存参考距离
# if not self.EBR2_update_flag[1]:
mis.guide['f_sgo_ref'] = interp_sgo_ref
def simulation_online(self, sim: TrajectorySimulation):
# 使用现有状态进行弹道仿真,传回最终状态及参考飞行距离
sim.guide.allow_update_param = False
sim.is_online = True
sim.guide.k_alpha = 0
sim.simulation()
result = sim.db.data
# CL, q_inf, v, h, phi, psi, alpha
E = sim.mis.E
alpha_bsl = sim.guide.attack_angle_plan(E)
alpha2 = sim.guide.alpha_2
q_S = sim.mis.guide['q_inf'] # 这里统一不除以S
v = sim.mis.guide["v"]
h = sim.mis.guide["h"]
CL = sim.mis.guide["CL"]
phi = sim.mis.status.latitude
psi = sim.mis.status.heading_angle
s_go = result['s_go']
E_node = result['E']
interp_sgo_ref = Interp1(E_node, s_go).pre
res = {
'alpha_bsl': alpha_bsl,
'alpha2': alpha2,
'q_S': q_S,
'v': v,
'h': h,
'CL': CL,
'phi': phi,
'psi': psi,
'interp_sgo_ref': interp_sgo_ref
}
return res
def bank_reverse(self, mis: Missile, tar: Missile = None, meta={}):
if not self.bank_reverse_flag[0]:
if mis.guide["E"] < self.EBR1:
self.bank_reverse_flag[0] = True
self.bank_reverse_time = self.bank_reverse_time + 1
print(f'第一次倾侧角反转, t = {meta["t"]}')
elif not self.bank_reverse_flag[1]:
if mis.guide["E"] < self.EBR2:
self.bank_reverse_flag[1] = True
self.bank_reverse_time = self.bank_reverse_time + 1
print(f'第二次倾侧角反转, t = {meta["t"]}')
def TDCT(self, alpha_bsl, sigma_bsl, delta_gamma):
"""
弹道阻尼抑制振荡
:param alpha_bsl: 攻角预指令
:param sigma_bsl: 倾侧角预指令
:param delta_gamma: 平稳滑翔弹道倾角 - 当前弹道倾角
:return: 攻角指令,倾侧角指令
"""
alpha_cmd = alpha_bsl + cos(sigma_bsl) * self.k_gamma_sgp * delta_gamma
sigma_cmd = sigma_bsl - sin(sigma_bsl) * self.k_gamma_sgp * delta_gamma / self.alpha_sgp
return alpha_cmd, sigma_cmd
def sigma_max(self, mis: Missile, tar: Missile = None, meta={}):
if not glbs.CONSIDER_SIGMA_MAX:
return np.inf
dHmindE = cav_corridor.dHmindE_E(mis.guide["E"])
D = mis.control.get('D', mis.guide['D'])
dHdE = -mis.guide["m"] * sin(mis.status.path_angle) / D
Hmin = cav_corridor.interp_Hmin_E(mis.guide["E"])
L1 = mis.guide["m"] * (e.g0 - mis.guide["v"] ** 2 / (e.Re + Hmin)
- e.omega_e ** 2 * (e.Re + Hmin) * cos(mis.status.latitude) ** 2
- 2 * mis.guide["v"] * e.omega_e * cos(mis.status.latitude)
* sin(mis.status.heading_angle))
Lmax = mis.guide["CL"] * 0.5 * ATM.rho(Hmin) * mis.guide["v"] ** 2 * mis.guide["S"]
if L1 > Lmax:
L1 = Lmax
sigma_max = arccos(L1 / Lmax) + self.k_sigma * (dHmindE - dHdE)
mis.guide["sigma_max_L1"] = L1
mis.guide["sigma_max_Lmax"] = Lmax
mis.guide["sigma_max_dHmindE"] = float(dHmindE)
mis.guide["sigma_max_dHdE"] = dHdE
mis.guide["sigma_max"] = sigma_max
return sigma_max
# return np.inf
def update_path_angle_sgp(self, mis: Missile, tar: Missile = None, meta={}):
"""
更新平稳滑翔所需的弹道倾角
:param mis:
:return:
"""
rho = ATM.rho(mis.guide["h"])
drho_dh = -0.00015 * rho
v = mis.guide["v"]
S = mis.guide["S"]
m = mis.guide["m"]
Rh = mis.guide["h"] + e.Re
sigma_bsl = self.sigma_bsl(mis, tar, meta)
alpha_bsl = self.attack_angle_plan(mis.guide['E'])
# CL_bsl = cav_corridor.interp_CL_E(mis.guide["E"])
# D_bsl = cav_corridor.interp_CD_E(mis.guide["E"]) * mis.guide["q_inf"]
# 使用参考攻角与当前马赫数计算参考升阻系数
CL_bsl, CD_bsl = mis.aero.CLCD(mis.ma, alpha_bsl)
D_bsl = CD_bsl * mis.guide["q_inf"]
dCL_dE = cav_corridor.interp_dCL_dE(mis.guide["E"])
d1 = rho * v ** 2 * S * cos(sigma_bsl) / 2 / m * dCL_dE + 2 / Rh + CL_bsl * rho * S * cos(sigma_bsl) / m
d2 = -CL_bsl * v ** 2 * S * cos(sigma_bsl) * drho_dh / 2 / e.g0 / m + 2 / Rh + CL_bsl * rho * S * cos(
sigma_bsl) / m + v ** 2 / Rh ** 2 / e.g0
mis.guide["gamma_sg"] = -D_bsl / m / e.g0 * d1 / d2
def alpha_bsl(self, mis: Missile, tar: Missile = None, meta={}):
return self.attack_angle_plan(mis.guide['E'])
def sigma_bsl(self, mis: Missile, tar: Missile = None, meta={}):
if self.guide_mode == 'descent_phase1' or self.guide_mode == 'descent_phase2':
return 0
if self.guide_mode == 'steady_glide_phase':
return self.sigma_bsl_sgp(mis, tar, meta)
if self.guide_mode == 'altitude_adjustment_phase':
return self.sigma_bsl_aap(mis, tar, meta)
def sigma_bsl_sgp(self, mis: Missile, tar: Missile = None, meta={}):
L1_L = self.L1D(mis) / (mis.guide["CL"] / mis.guide["CD"])
assert 1 >= L1_L >= -1, '超出射程范围'
res = self.sgn * arccos(L1_L)
if mis.guide["E"] > self.EBR1:
return res
else:
return -res
def sigma_bsl_aap(self, mis: Missile, tar: Missile = None, meta={}):
gamma = mis.status.path_angle
phi = mis.status.latitude
psi = mis.status.heading_angle
H = e.Re + mis.guide["h"]
v = mis.guide["v"]
delta_psi = mis.guide['delta_psi']
s_go = mis.guide["s_go"]
s_go_EBR2 = mis.guide["sgo_EBR2"]
d_psi_LOS = mis.guide["v"] * cos(gamma) * sin(delta_psi) / s_go
k_PN = 4 - 2 * s_go / s_go_EBR2
delta_L1_m = e.omega_e ** 2 * H * cos(phi) ** 2 + 2 * v * e.omega_e * cos(phi) * sin(psi)
delta_L2_m = -e.omega_e ** 2 * H * sin(phi) * cos(phi) * sin(psi) + 2 * v * e.omega_e * sin(phi)
aL1 = e.g0 - v ** 2 / H - delta_L1_m
aL2 = k_PN * d_psi_LOS * v * cos(gamma) - delta_L2_m
mis.guide["sigma_aap_aL2"] = aL2
mis.guide["sigma_aap_deltaL2m"] = delta_L2_m
return -arctan(aL2 / aL1)
def L1D(self, mis: Missile, E=None):
if not E:
E = mis.guide["E"]
if E >= self.E_alpha:
return mis.guide["L1D1"]
else:
return ((self.E_alpha - E) / (self.E_alpha - self.E_TAEM)) ** 2 * (
mis.guide["L1D2"] - mis.guide["L1D1"]) + mis.guide["L1D1"]
def update_L1D(self, mis: Missile, tar: Missile = None, meta={}):
# 1. 计算L1/D2
L1D2 = self.L1D2(mis)
mis.guide["L1D2"] = L1D2
# 2. 计算L1/D1
kxD1, kxD2 = self.kxD(mis, tar, meta)
mis.guide["L1D1"] = (mis.guide["s_go"] - self.S_TAEM - kxD1 * L1D2) / (-kxD1 + kxD2)
def update_EBR1(self, mis: Missile, tar: Missile = None, meta={}):
print(f'更新EBR1,t={meta["t"]}')
optimize.newton(lambda x: self.tempxCf(mis, x), self.EBR1, fprime=lambda x: self.tempdxCf(mis, x), tol=1e3)
def tempxCf(self, mis, EBR1):
self.EBR1 = EBR1
return self.xCf(mis)
def tempdxCf(self, mis, EBR1):
return self.dxCf(mis)
def xCf(self, mis: Missile):
Rs = e.Re + cav_corridor.average_h_e
E = mis.guide["E"]
kh3 = mis.guide["kh3"]
h3 = lambda x: (kh3[0] + kh3[1] * x + kh3[2] * x ** 2) / self.h_m(mis, x)
delta_psi = mis.guide['delta_psi']
integ = integrate.quad(
lambda x: sin(self.xD(mis, self.E_TAEM, x) / e.Re) * self.L1D(mis, x) * h3(x) / (2 * x + 2 * e.mu / Rs)
, E, self.E_TAEM)
res = e.Re * delta_psi * sin(self.xD(mis, self.E_TAEM, E) / e.Re) + e.Re * integ[0] - self.sgn * e.Re * (
self.F(mis, self.EBR1, E) - self.F(mis, self.EBR2, self.EBR1) + self.F(mis, self.E_TAEM, self.EBR2))
return res
def dxCf(self, mis: Missile):
return -2 * self.sgn * e.Re * sin(self.xD(mis, self.E_TAEM, self.EBR1) / e.Re) * self.f2(mis, self.EBR1)
def F(self, mis: Missile, x2, x1):
res = integrate.quad(lambda x: sin(self.xD(mis, self.E_TAEM, x) / e.Re) * self.f2(mis, x), x1, x2)
return res[0]
def L2D(self, mis: Missile, E=None):
if not E:
E = mis.guide["E"]
return abs(np.sqrt(cav_corridor.L2D_E(E) ** 2 - self.L1D(mis, E) ** 2))
def f2(self, mis: Missile, E=None):
if not E:
E = mis.guide["E"]
return self.L2D(mis, E) * (1 + (mis.guide["kh2"][0] + mis.guide["kh2"][1] * E) / self.h_m(mis, E)) / (
2 * E + 2 * e.mu / (cav_corridor.average_h_e + e.Re))
def xD(self, mis: Missile, E_end, E_start):
"""
射程估计
:param mis:
:param E_end, E_start: E_end < E_start
:return:
"""
# assert 后面的元组不能加括号,否则恒为True
assert E_start >= E_end, '起始能量应该大于终止能量'
L1D1 = mis.guide["L1D1"]
kh1 = mis.guide["kh1"]
Rs = e.Re + cav_corridor.average_h_e
temp1 = np.log((2 * E_end + e.mu / Rs) / (2 * E_start + e.mu / Rs))
temp2 = 1 / (2 * E_end + e.mu / Rs) - 1 / (2 * E_start + e.mu / Rs)
if self.E_alpha <= E_end:
res = e.Re * L1D1 / 2 * (1 + kh1[1] / 2) * temp1 - e.Re * L1D1 / 2 * (
kh1[0] - e.mu * kh1[1] / 2 / Rs) * temp2
elif self.E_alpha >= E_start:
L1D2 = mis.guide["L1D2"]
a0 = (L1D2 - L1D1) * self.E_alpha ** 2 / (self.E_alpha - self.E_TAEM) ** 2 + L1D1
a1 = -2 * (L1D2 - L1D1) * self.E_alpha / (self.E_alpha - self.E_TAEM) ** 2
a2 = (L1D2 - L1D1) / (self.E_alpha - self.E_TAEM) ** 2
res = (1 + kh1[1] / 2) * e.Re * a2 / 4 * (E_end ** 2 - E_start ** 2) + e.Re / 2 * (
(1 + kh1[1] / 2) * a1 + (-1 - kh1[1] + Rs * kh1[0] / e.mu) * e.mu * a2 / 2 / Rs) * (E_end - E_start) \
+ e.Re / 2 * ((1 + kh1[1] / 2) * a0 + (-1 - kh1[1] + Rs * kh1[0] / e.mu) * e.mu * a1 / 2 / Rs) * temp1 \
+ e.Re / 2 * (e.mu ** 2 * (2 + 3 * kh1[1]) - 4 * e.mu * Rs * kh1[0]) / (8 * Rs ** 2) * a2 * temp1 \
- e.Re / 2 * (kh1[0] - e.mu * kh1[1] / 2 / Rs) * (
a0 - e.mu * a1 / 2 / Rs + e.mu ** 2 / 4 / Rs ** 2 * a2) * temp2
else:
res = self.xD(mis, self.E_alpha, E_start) + self.xD(mis, E_end, self.E_alpha)
return res
def kxD(self, mis: Missile, tar: Missile, meta={}):
Re = e.Re
kh1 = mis.guide["kh1"]
Rs = cav_corridor.average_h_e + Re
Ea, Et = self.E_alpha, self.E_TAEM
kxd1 = -Re / 4 * (1 + kh1[1] / 2) * (Ea + Et) / (Ea - Et) \
+ Re / 2 / (Ea - Et) * (2 * Ea + e.mu / 2 / Rs + (Ea + e.mu / 2 / Rs) * kh1[1] - kh1[0] / 2) \
+ np.log((2 * Et + e.mu / Rs) / (2 * Ea + e.mu / Rs)) * Re * (2 * Ea * Rs + e.mu) / 2 / (Ea - Et) ** 2 * \
((2 * Ea * Rs + e.mu) / 4 / Rs ** 2 - kh1[0] / 2 / Rs + (2 * Ea * Rs + 3 * e.mu) / 8 / Rs ** 2 * kh1[1]) \
- Re * (2 * Ea * Rs + e.mu) ** 2 / 8 / Rs ** 2 / (Ea - Et) ** 2 * \
(kh1[0] - e.mu * kh1[1] / 2 / Rs) * (1 / (2 * Et + e.mu / Rs) - 1 / (2 * Ea + e.mu / Rs))
log_ = (2 * Et + e.mu / Rs) / (2 * mis.guide["E"] + e.mu / Rs)
kxd2 = Re / 2 * (1 + kh1[1] / 2) * np.log(log_) \
- Re / 2 * (kh1[0] - e.mu * kh1[1] / 2 / Rs) * \
(1 / (2 * Et + e.mu / Rs) - 1 / (2 * mis.guide["E"] + e.mu / Rs))
return kxd1, kxd2
def update_kh(self, mis: Missile, tar: Missile, meta={}):
Rs = cav_corridor.average_h_e + e.Re
m_phi = mis.status.latitude
t_phi = tar.status.latitude
E = mis.guide["E"]
ET = self.E_TAEM
ha = mis.guide["great_circle_heading"]
# 以L/D * cos(σ)代表上一步的纵向升阻比
hz1 = -2 * Rs * e.omega_e * mis.guide["v"] * cos(m_phi) * sin(ha[0]) - \
Rs * e.omega_e ** 2 * (e.Re + mis.guide["h"]) * cos(m_phi) * \
(cos(m_phi) - mis.guide["CL"] / mis.guide["CD"] * cos(mis.control["bank_angle"]) * sin(
m_phi) * cos(ha[0]))
hz1_t = -2 * Rs * e.omega_e * self.V_TAEM * cos(t_phi) * sin(ha[1]) - \
Rs * e.omega_e ** 2 * (e.Re + self.H_TAEM) * cos(t_phi) * \
(cos(t_phi) - mis.guide["L2Dbsl_TAEM"] * sin(
t_phi) * cos(ha[1]))
kh1 = (hz1_t * E - hz1 * ET) / (E - ET), \
(hz1 - hz1_t) / (E - ET)
hz2 = e.omega_e ** 2 * Rs * (e.Re + mis.guide["h"]) * mis.guide["CL"] / mis.guide["CD"] * cos(
mis.control["bank_angle"]) * sin(m_phi) * cos(m_phi) * cos(ha[0])
hz2_t = e.omega_e ** 2 * Rs * (e.Re + self.H_TAEM) * mis.guide["L2Dbsl_TAEM"] * sin(t_phi) * cos(
t_phi) * cos(ha[1])
kh2 = (hz2_t * E - hz2 * ET) / (E - ET), \
(hz2 - hz2_t) / (E - ET)
kh3 = -2 * e.omega_e * Rs * (self.V_TAEM * E - ET * mis.guide["v"]) * (sin(t_phi) * E - sin(m_phi) * ET) / (
E - ET) ** 2, \
-2 * e.omega_e * Rs * (self.V_TAEM * sin(m_phi) + mis.guide["v"] * sin(t_phi)) * (E + ET) / (E - ET) ** 2 \
+ 4 * e.omega_e * Rs * (self.V_TAEM * sin(t_phi) * E + mis.guide["v"] * sin(m_phi) * ET) / (E - ET) ** 2, \
-2 * e.omega_e * Rs * (mis.guide["v"] - self.V_TAEM) * (sin(m_phi) - sin(t_phi)) / (E - ET) ** 2
mis.guide["kh1"] = kh1
mis.guide["kh2"] = kh2
mis.guide["kh3"] = kh3
def h_m(self, mis: Missile, E=None):
if not E:
E = mis.guide["E"]
Rs = e.Re + cav_corridor.average_h_e
return 2 * E + e.mu / Rs
def L1D2(self, mis: Missile):
# k_L/D * L/D_bsl_TAEM 取k_L/D=1
return mis.guide["L2Dbsl_TAEM"]
def s_go(self, mis: Missile, tar: Missile = None, meta={}):
x_mis = ct.coordinate_transformation(0, lamb=mis.status.longitude, phi=mis.status.latitude)
x_tar = ct.coordinate_transformation(0, lamb=tar.status.longitude, phi=tar.status.latitude)
return e.Re * arccos(x_mis.dot(x_tar))
def guide2control(self, mis: Missile, tar: Missile = None, meta={}):
for item in self.control_param_list:
mis.control[item] = mis.guide[item]
mis.control['CL'], mis.control['CD'] = mis.aero.CLCD(mis.ma, mis.guide["attack_angle"])
mis.control['L'] = mis.control['CL'] * mis.guide['q_inf']
mis.control['D'] = mis.control['CD'] * mis.guide['q_inf']
def end_guide(self, mis: Missile = Missile(), tar: Missile = Missile(), meta={}, flag=False):
if self.end_flag:
return True
if mis.guide.get('s_go', np.inf) < self.S_TAEM:
self.end_flag = True
if mis.guide.get('E', np.inf) < (self.E_TAEM - 5E5) and not self.allow_update_param:
self.end_flag = True
if flag:
self.end_flag = flag
return self.end_flag
| [] |
2024-01-10 | aofenghanyue/EntryGuidance | customSimulation.py | import numpy as np
import settings as glbs
from entity.missile import Missile
from guidance.guide import Guidance
from simulation import TrajectorySimulation
from store.dataSave import DataSave
from utils.integral import Integral
class CustomSimulation(TrajectorySimulation):
def __init__(self):
super(CustomSimulation, self).__init__()
def init(self, mis=Missile(), tar=Missile(), guide=Guidance(), integ=Integral(), db=DataSave()):
self.mis = mis
self.tar = tar
self.guide = guide
self.integral = integ
self.db = db
# 初始化导弹及目标状态
self.mis.status.change_stat(glbs.MissileInitStatus)
self.tar.status.change_stat(glbs.TargetInitStatus)
# 初始化制导模型
self.guide.init(self.mis, self.tar)
self.is_online = False
def step_len(self) -> float:
if self.guide.accurate_mode:
return pow(0.1, self.guide.accurate_mode)
return 1
def save_data(self) -> None:
self.db_save_dict = {"global_t": self.t,
"E": self.mis.guide["E"]}
self.db_save_dict.update(self.mis.status.status_dict())
self.db_save_dict.update(self.mis.control)
self.db_save_dict.update(self.from_mis_guide(getattr(glbs, 'GUIDE_SAVE_PARAM', [])))
self.db.update(self.db_save_dict)
def is_continue(self):
return True
def after_simulation(self):
if self.is_online:
return
status = {
'经度': np.rad2deg(self.mis.status.longitude),
'纬度': np.rad2deg(self.mis.status.latitude),
'时间': self.mis.status.t,
'速度': self.mis.status.velocity,
'高度': self.mis.status.height,
'Δψ': self.mis.guide["delta_psi"]
}
print('导弹结束状态:')
for k in status.keys():
print(f'{k}: {status[k]}\n')
| [] |
2024-01-10 | aofenghanyue/EntryGuidance | core~MultiMissileSim.py | # -*- coding: utf-8 -*-
# EditTime : 2021-09-27 19:13
# Author : Of yue
# File : MultiMissileSim.py
# Intro :
from itertools import chain
from typing import List
from entity.missile import Missile
from guidance.MultiGuide import MultiMissileGuidance
from store.dataSave import DataSave
from utils.integral import Integral
import multiset as glbs
class MultiMissileSim:
def __init__(self):
# 最大迭代次数
self.max_iter = getattr(glbs, "MAXITER", 0)
# 当前迭代次数
self.iter = 0
# 最小积分步长
self.min_h = getattr(glbs, "MIN_H", 1E-4)
# 全局时间
self.t = getattr(glbs, "t0", 0)
# 参与仿真的导弹
self.mis: List[Missile] = []
self.tar: List[Missile] = []
# 制导模块
self.guide: MultiMissileGuidance = MultiMissileGuidance()
self.guide_params_list = getattr(glbs, "ParamsToGuide", [])
# 积分模块
self.integral = Integral()
# 数据存储模块
self.db: List[DataSave] = []
self.db_save_dict: dict = {} # 每次存储值的临时字典
self.init()
# 其它
# 是否为在线临时仿真
self.is_main = True
def init(self, mis=[], tar=[], guide=MultiMissileGuidance(), integ=Integral(), db=[]):
"""
初始化导弹,目标,制导模式,积分,存储模块等信息,需重载
:param mis: List[Missile]
:param tar: List[Missile]
:param guide: Guidance
:param integ: Integral
:param db: List[DataSave]
:return:
"""
pass
def step_len(self):
"""
计算每一步的积分步长,需重载
:return:
"""
return self.min_h
def simulation(self):
self._before_simulation()
while self.next():
self._after_one_step()
self._after_simulation()
def next(self):
"""
单次仿真循环,当停止制导后自动结束
:return:
"""
if self._is_continue():
if self.one_step_guide():
# 此处应该先保存上一步的状态值,再改变状态
self.after_guide()
# 积分且更新状态
self.one_step_integral() # 包含了self.one_step_update_status()
return True
return False
def one_step_guide(self):
"""
1.产生控制指令
:return: True代表单步制导执行成功
False代表制导结束
"""
# 产生传入制导模块的相关参数
# 产生制导指令, 如果制导指令成功生成,则继续仿真
return self.guide.one_step_guide(self.mis, self.tar, self.guide_params())
def one_step_integral(self):
"""
2.各个体单步积分
:return:
"""
for m in chain(self.mis, self.tar):
self.one_step_integral_object(m)
def one_step_integral_object(self, obj: Missile):
# 如果时间累计大于步长,则制导一次
if not obj.guide["guide_process"]:
return False
if obj.guide["end_guide"]:
return False
if obj.launched:
# 如果导弹已发射则更新状态
x, y0 = obj.status.x, obj.status.y
temp_step_len = obj.guide.get("step_len", self.step_len())
y_next, dy = self.integral.next_step(obj.equation.equation, x, y0, temp_step_len, obj.control, need_dy=True)
x_next = x + temp_step_len
if dy is not None:
obj.guide["dy"] = dy
# 3.各个体更新状态
status_update_data = {k: v for k, v in zip(obj.status.integral_key, y_next)}
status_update_data.update({obj.status.independent_key: x_next})
obj.status.change_stat(status_update_data)
def guide_params(self):
"""
产生要传入制导模块的参数
:return: None
"""
temp_guide_params = {}
for param in self.guide_params_list:
if param == "self":
temp_guide_params["simulation"] = self
else:
temp_guide_params[param] = getattr(self, param, None)
return temp_guide_params
def save_data(self) -> None:
"""
存储数据
:return: None
"""
for index, (mis, db) in enumerate(zip(self.mis, self.db)):
# 如果此仿真周期进行了制导,则保存数据
if mis.guide["guide_process"] and not mis.guide["end_guide"]:
db_save_dict = {"global_t": self.t}
# 默认只存储时间、导弹状态和控制量
db_save_dict.update(mis.status.status_dict())
db_save_dict.update(mis.control)
db.update(db_save_dict)
def _is_continue(self) -> bool:
"""判断是否继续仿真"""
if self.iter > self.max_iter:
return False
if not self.is_continue():
return False
return True
def is_continue(self):
return True
def after_guide(self):
"""
制导指令计算完成后进行
可存储导弹及其控制指令
:return:
"""
self.save_data()
def _after_one_step(self):
"""每个积分循环完成后需要进行的操作
可以存储数据等,但一般存储数据在制导完成之后进行"""
# 迭代次数+1
self.iter += 1
# 时间前进h
self.t += self.min_h
self.after_one_step()
def after_one_step(self):
pass
def _before_simulation(self):
print("仿真开始")
self.before_simulation()
def before_simulation(self):
pass
def _after_simulation(self):
"""仿真结束后需要做的事情
例如将结果输出为图像"""
print("仿真结束")
self.after_simulation()
pass
def after_simulation(self):
pass
def from_mis_guide(self, mis: Missile, param_list: list):
return {param: mis.guide.get(param, None)
for param in param_list}
| [] |
2024-01-10 | aofenghanyue/EntryGuidance | core~multiMissileSimInstance.py | # -*- coding: utf-8 -*-
# EditTime : 2021-09-29 20:09
# Author : Of yue
# File : multiMissileSimInstance.py
# Intro :
import numpy as np
import multiset as glbs
from entity.missile import Missile
from guidance.MultiGuide import MultiMissileGuidance
from core.MultiMissileSim import MultiMissileSim
from store.dataSave import DataSave
from utils.integral import Integral
class MultiMisSimInstance(MultiMissileSim):
def __init__(self):
super(MultiMisSimInstance, self).__init__()
def init(self, mis=[], tar=[], guide=MultiMissileGuidance(), integ=Integral(), db=[]):
self.mis = mis
self.tar = tar
self.guide = guide
self.integral = integ
self.db = [db for _ in mis]
# 初始化导弹及目标状态
for m, t, status in zip(self.mis, self.tar, glbs.StatusParams):
m.status.change_stat(status["MissileInitStatus"])
t.status.change_stat(status["TargetInitStatus"])
# 初始化制导模型
self.guide.init(self.mis, self.tar, self.guide_params())
def save_data(self) -> None:
for index, (mis, db) in enumerate(zip(self.mis, self.db)):
# 如果此仿真周期进行了制导,则保存数据
if mis.guide["guide_process"] and not mis.guide["end_guide"]:
db_save_dict = {"global_t": self.t, "E": mis.guide["E"]}
db_save_dict.update(mis.status.status_dict())
db_save_dict.update(mis.control)
db_save_dict.update(self.from_mis_guide(mis, getattr(glbs, 'GUIDE_SAVE_PARAM', [])))
db.update(db_save_dict)
| [] |
2024-01-10 | aofenghanyue/EntryGuidance | multi_main.py | # -*- coding: utf-8 -*-
# EditTime : 2021-10-27 9:27
# Author : Of yue
# File : multi_main.py
# Intro :
import pandas
import settings as glbs
from guidance.multiMissileGuideInstance import MultiMisGuideInstance
from core.multiMissileSimInstance import MultiMisSimInstance
from database.vehicleParams import CAVHParams
from dynamics.aerodynamic import AerodynamicCAVH
from dynamics.motionEquation import ME6D
from entity.missile import Missile
from store.dataSave import DataSave
from store.status import ME6DStatus, MissileStatus
from utils.integral import RungeKutta4
mis = [Missile(motion_equation=ME6D(),
aerodynamic=AerodynamicCAVH(),
params=CAVHParams(),
status=ME6DStatus())]
# 2. 创建目标对象
tar = [Missile(status=MissileStatus())]
# 3. 创建制导模块
guide = MultiMisGuideInstance()
# 4. 创建积分模块
integral = RungeKutta4()
# 5. 创建数据存储模块
database = DataSave()
"""创建仿真模型"""
simulation = MultiMisSimInstance()
simulation.init(mis=mis, tar=tar, guide=guide, integ=integral, db=database)
"""开始仿真"""
simulation.simulation()
result = simulation.db[0].data
pass | [] |
2024-01-10 | lingwsh/ben_tech_python | whisper~whisper_api.py | import openai
import time
start_time = time.time()
api_key = "Input your API key here"
openai.api_key = api_key
# Better use your local absolute path
file_path = "/data/test.m4a"
file = open(file_path, "rb")
prompt = "Write your prompt here"
# response_format: default is json. text, srt, verbose_json, or vtt are also supported.
# API link:
# https://platform.openai.com/docs/api-reference/audio/create
# language: default is autodetect.
# Supported languages:
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
transcription = openai.Audio.transcribe("whisper-1", file, prompt=prompt, response_format="srt", language="zh")
# Better use your local absolute path and set the output file name
output_path = "/output/test.srt"
# Error fix file path: venv/lib/python3.9/site-packages/openai/api_requestor.py
with open(output_path, "w") as f:
f.write(str(transcription))
end_time = time.time()
time_spent = round(end_time - start_time, 2)
print("Time spent:", time_spent, "seconds") | [
"Write your prompt here"
] |
2024-01-10 | lingwsh/ben_tech_python | whisper~video_srt.py | import moviepy.editor as mp
import openai
import time
import os
# Extract audio from video to srt
path = "/ben_tech/12_python_string/output/python_string-YouTube_4k.mp4"
prompt = "使用ChatGPT基于GPT4 讲解Python String"
api_key = "Put your key in here"
# Get file name without extension
filename = path.split("/")[-1].split(".")[0]
print(filename)
my_clip = mp.VideoFileClip(path)
output_name = "/data/{}.mp3".format(filename)
# Check the path file exists
if not os.path.exists(output_name):
my_clip.audio.write_audiofile(output_name)
# Use OpenAI API to transcribe audio to text
start_time = time.time()
openai.api_key = api_key
# Better use your local absolute path
file_path = output_name
file = open(file_path, "rb")
# response_format: default is json. text, srt, verbose_json, or vtt are also supported.
# API link:
# https://platform.openai.com/docs/api-reference/audio/create
# language: default is autodetect.
# Supported languages:
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
print("Start transcribing...")
transcription = openai.Audio.transcribe("whisper-1", file, prompt=prompt, response_format="srt", language="zh")
# Better use your local absolute path and set the output file name
output_path = "/output/{}.srt".format(filename)
with open(output_path, "w") as f:
f.write(str(transcription))
end_time = time.time()
time_spent = round(end_time - start_time, 2)
print("Time spent:", time_spent, "seconds") | [
"使用ChatGPT基于GPT4 讲解Python String"
] |
2024-01-10 | markling1979/langchain-doc-summary | all-in-one~pages~2_URL_Summary.py | import validators, streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import UnstructuredURLLoader
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
# Set API keys from session state
openai_api_key = st.session_state.openai_api_key
# Streamlit app
st.subheader('URL Summary')
url = st.text_input("Enter Source URL")
# If 'Summarize' button is clicked
if st.button("Summarize"):
# Validate inputs
if not openai_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not url:
st.error("Please provide the URL.")
elif not validators.url(url):
st.error("Please enter a valid URL.")
else:
try:
with st.spinner("Please wait..."):
# Load URL data
loader = UnstructuredURLLoader(urls=[url])
data = loader.load()
# Initialize the ChatOpenAI module, load and run the summarize chain
llm = ChatOpenAI(temperature=0, model='gpt-3.5-turbo', openai_api_key=openai_api_key)
prompt_template = """Write a summary of the following in 200-250 words:
{text}
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["text"])
chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt)
summary = chain.run(data)
st.success(summary)
except Exception as e:
st.exception(f"Exception: {e}")
| [
"Write a summary of the following in 200-250 words:\n \n {text}\n\n "
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | utils~templates_prompts.py | from langchain.prompts import PromptTemplate, FewShotPromptTemplate
from utils.parsers import *
PAST_MISTAKES ='''
Below I have mentioned common mistakes made by you while using the tools.
{mistakes}
!! PLEASE GO THROUGH THEM CAREFULLY AND AVOID MAKING SIMILAR MISTAKES.
Note that the context of above mistakes are different AND INDEPENDENT FROM CURRENT USER QUERY.
DO NOT TAKE CONTEXT FROM ABOVE QUERIES.
'''
PREFIX = """
Below are the tools in your tool-kit along with their description to help you decide on tool choice.
"""
#____________________________________________________________________________________________________________
FORMAT_INSTRUCTIONS = """
ALERT !!!
- The Thought-Action-Observation repeats until we feel that agent has completely answered the user query.
- Each time this process repeates, you need to write some reason in Thought of choosing a particular tool.
Use the following format:
Question: the input question you must answer
Thought : The reason of picking the tool in process of answering user query.
Action : the Tool to take , should be one of [{tool_names}]
Action Input: - Your selected tool will need get its arguments filled by another agent. This agent does not have access to the query or the current output chain.
- PRECISELY TELL THIS AGENT HOW YOU WANT IT TO FILL THE ARGUMENTS, in natural language, give emphasis to the argument name and its value.
- IF you feel that this tool should needs output of other tools, you can infer their output stored in format $$PREV[i], where i is the index of the output you want to use.
... (this Thought/Action/Action Input must ONLY OCCUR ONCE)
Note that it is possible that the query has been successfully answered and no further tool calls are required
In this case return:
Thought: Task has been completed
Action: NONE
Action Input: Task complete
"""
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
# MISTAKE_SELECTION = '''
# Below you are provided with one of the past mistakes made by another AI agent on some other user query :
# {mistake}
# Below you are provided with the current user query :
# CURRENT_USER_QUERY : {input}
# Check diligently if the current user query is similar to the past query, in terms of the vulnerability of the AI agent to make the same mistake.
# If there are some chances of making the same or similar mistake on the current user query, return 1 else return 0.
# ANSWER :
# '''
MISTAKE_SELECTION = '''
As a person learns to perform a task correctly by acknowledging his mistakes, similarly an AI agent can also learn to perform a task correctly by acknowledging its mistakes.
Below is the user query AI agent is trying to solve:
USER_QUERY : {input}
Below is one of the past mistake made by AI agent on some other user query:
MISTAKE : {mistake}
- You need to check if the above mistake is relevant to the current user query or not.
- Whether the AI agent should use this mistake as experience while solving the current user query or not.
FORMAT_INSTRUCTIONS :
- Return 1 if the above mistake is relevant to the current user query, else return 0.
- Stick to the above information provided, decide importance of mistake judiciously, don't pollute the information.
ANSWER :
'''
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
TOOL_INPUT_PROMPT = '''
The user query is as follows:
User_Query : {query}
The intermediate steps taken till now to solve the above query are as follows:
{intermediate_steps}
Below is the next tool that needs to be used as next tool in intermediate steps: {tool_name}
The short description of the tool, to help you reason out, is as follows:
{tool_description}
You are expected to create a sub-task for the above tool from the given user_query, tool_description and the intermediate steps taken till now.
While creating the sub-task for above tool, adhere to tool description.
Don't query tool for tasks which are not mentioned in tool description.
FORMAT INSTRUCTIONS :
{format_instructions}
'''
# ===================================================================================================================================================================================================
EXAMPLES = [
{
'query': "Find all high priority issues related to part 'FEAT-123' created by user 'DEVU-123', prioritize them, and add them to the current sprint" ,
'intermediate_steps': '''[
{{"tool_name": "works_list", "arguments": [{{"argument_name": "issue.priority", "argument_value": "high"}},
{{"argument_name": "applies_to_part", "argument_value": "FEAT-123"}},
{{"argument_name": "created_by", "argument_value": "DEVU-123"}}]}},
{{"tool_name": "prioritize_objects", "arguments": [{{"argument_name": "objects", "argument_value": "$$PREV[0]"}}]}},
{{"tool_name": "get_sprint_id", "arguments": []}},
]''',
'tool_name': 'add_work_items_to_sprint',
'tool_description': "Adds the given work items to the sprint. This tool needs to know the list of work_id and the sprint_id to which the work items should be added.",
'tool_input': "Add work items $$PREV[1] to sprint_id $$PREV[2]"
}
]
EXAMPLE_FORMATTER_TEMPLATE = """
query: {query}\n
intermediate steps : {intermediate_steps}\n
tool_name: {tool_name}
tool_description: {tool_description}
tool_input: {tool_input}\n\n
"""
EXAMPLE_PROMPT = PromptTemplate(
input_variables=["query", "intermediate_steps" , "tool_name" , "tool_description", "tool_input"],
template=EXAMPLE_FORMATTER_TEMPLATE,
)
# ===================================================================================================================================================================================================
sub_task_prompt = FewShotPromptTemplate(
examples=EXAMPLES,
# prompt template used to format each individual example
example_prompt=EXAMPLE_PROMPT,
# prompt template string to put before the examples, assigning roles and rules.
prefix="Here are some few shots of how to create sub-task for a given tool based query, intermediate_steps and tool_description:\n",
# prompt template string to put after the examples.
suffix=TOOL_INPUT_PROMPT,
# input variable to use in the suffix template
input_variables=["query" , "intermediate_steps" , "tool_name" , "tool_description"],
example_separator="\n",
partial_variables= {'format_instructions' : sub_task_parser.get_format_instructions()}
)
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
MISSED_TOOL_TEMPLATE = '''
There is an AI agent which is picking up tools under ReAct framework to solve user queries.
It misses picking up correct tool, being unable to reason out its usage for the given query.
I provide you some few shots how to reason out the mistake highlight of a tool based on the user query and tool description:
Example_Query : "Prioritize my p0 issues"
Tools missed:
Incorrect tool uses:
Example_MISTAKE : "The tool 'who_am_i' is useful as there is a keyword 'my' which hints towards the user currently logged in. So, this tool can get the user id of the user currently logged in."
Example_Query : "Summarize high severity tickets from the customer UltimateCustomer"
Tools missed:
Incorrect tool uses:
Example_MISTAKE :"We need to find the id of the object, so we must use the tool 'search_object_by_name' tool which searches for the object id based on the name of the object."
Example_Query : "What are my all issues in the triage stage under part FEAT-123? Summarize them"
Tools missed:
Incorrect tool uses:
Example_MISTAKE :"We need to find the id of the object, so we must use the tool 'search_object_by_name' tool which searches for the object id based on the name of the object."
- You need to provide an eye-catchy insight of why that tool should not be missed for the given query based on the user query and tool description.
- You insight will help the agent to learn from its mistakes. Don't be super-specific to user query, keep the tool description in consideration.
- Keep your insight within 20 words and at least 9 words. Present answer in a paragraph.
USER_QUERY : {query}
Tools missed : {tools_missed}
Incorrect tool uses : {incorrect_tool_uses}
Agent_MISTAKE :
'''
missed_tool_prompt = PromptTemplate(template=MISSED_TOOL_TEMPLATE, input_variables=['agent_scratchpad','query' ,'correct_tool_name' , 'tool_description'])
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
TOOLS_PROMPT_EXAMPLES = '''
Your task is to extract the argument value from user query based on argument description and argument type.
The datatype is {arg_dtype}
The argument description is:
{arg_description}
The above mentioned arguments have their values present in the query. You need to extract the argument value from the query.
Don't pollute the information, stick to information provided in the user query.
Your query is:
{user_query}
FORMAT INSTRUCTIONS --->
- Don't return anything else other than the argument value.
- Ensure that the argument value is in correct data type before returning.
- If the argument value is not explicitly present in the query, then return "NONE".
ALERT !!!
- If the Query contains specific keywords like $$PREV[i], where i is the index of the output you want to use,
then it is a symbolic representation of the output and is NOT THE ACTUAL OUTPUT
- use $$PREV[i] as whole and don't pass invalid representation like "$$PREV" OR "$$PREV[]" or i
ANSWER :
'''
#____________________________________________________________________________________________________________
ARG_FILTER_PROMPT = '''
I want to use a tool which has a lots of arguments. I want to filter out only those arguments which can surely be extracted from the user query.
Below I provide the arguments that the tool can take along with their description:
{arg_description}
{format_instructions}
Below I provide the query, which needs to be referenced whiele deciding which arguments to filter out:
QUERY : {query}
ALERT !!!
- Don't create any new argument from user query, make sure that filtered argument have correct name.
- If the Query contains specific keywords like $$PREV[i], then take it as a whole.
- Stick to information provided in the user query and description of arguments.
- Don't pollute the argument filtering with any assumptions.
- Make sure that there are no parsing errors.
'''
#____________________________________________________________________________________________________________
LOGICAL_TEMPLATE = '''
You need to return a code block executing the above user query in the python.
Below I provide an example code block in python so that you know the desired output format:
Example 1:
```
### Query: Calculate the difference in count between P1 and P2 issues
def count_difference(p1_issues, p2_issues):
return len(p1_issues) - len(p2_issues)
count_difference("$$PREV[0]", "$$PREV[1]")
```
Example 2:
```
### Query: Extract the first five tasks from the list of all tasks.
def first_five_tasks(tasks):
return tasks[:5]
first_five_tasks("$$PREV[0]")
```
(note that there were other tool calls before this as well that were ommitted)
You may devise any function of your own using a combination of sum, variables, loops, if-else statements etc.
- Make sure that the code is in correct syntax before returning.
- Don't return anything else other than the code block.
- Simply return the code block, nothing else to be returned
You have the following user query:
{query}
'''
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
CRITIQUE_TEMPLATE = '''
Below you are provided the tools available in toolkit and their description :
{tools}
FORMAT INSTRUCTIONS :
{format_instructions}
Here are a few examples of sample outputs:
QUERY_EXAMPLE : What is the use of life?
OUTPUT : {{"answer" : 0 , "reason" : "The available tools are not useful to answer the query."}}
QUERY_EXAMPLE : "List all work items similar to TKT-420 and add the top 2 highest priority items to the current sprint
OUTPUT : {{"answer" : 1 , "reason" : "We can use get_similar_items, prioritize_objects, get_sprint_id, add_work_items_to_sprint to solve query."}}
QUERY_EXAMPLE : Search for youtube videos of user id DEVU-67
OUTPUT : {{"answer" : 0 ,'reason' : "no tool is present to search for youtube videos"}}
QUERY_EXAMPLE : "Create a excel file of work items in the current sprint
OUTPUT : {{"answer" : 0 , "reason" : "no tool is present to create excel file"}}
Give a similar answer, reason pair for the below query. If answer is 1, tell me what all tools you would use
QUERY : {query}
ALERT !!
- Make sure that there are no parsing errors.
'''
critique_prompt = PromptTemplate(template=CRITIQUE_TEMPLATE, input_variables=['query' ,'tools'],
partial_variables={'format_instructions' : critique_parser.get_format_instructions()})
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
# ===================================================================================================================================================================================================
# You are also provided the dataypes of arguments present in the user query:
# {function_signature}
# ["red" , ""$$PREV[0]"] | [
"We can use get_similar_items, prioritize_objects, get_sprint_id, add_work_items_to_sprint to solve query.",
"\n",
"Prioritize my p0 issues",
"tool_description",
"\n\nYour task is to extract the argument value from user query based on argument description and argument type.\n\nThe datatype is {arg_dtype}\n\nThe argument description is:\n{arg_description}\n\nThe above mentioned arguments have their values present in the query. You need to extract the argument value from the query.\nDon't pollute the information, stick to information provided in the user query.\n\nYour query is:\n{user_query}\n\nFORMAT INSTRUCTIONS --->\n - Don't return anything else other than the argument value.\n - Ensure that the argument value is in correct data type before returning.\n - If the argument value is not explicitly present in the query, then return \"NONE\".\n\nALERT !!!\n- If the Query contains specific keywords like $$PREV[i], where i is the index of the output you want to use, \n then it is a symbolic representation of the output and is NOT THE ACTUAL OUTPUT\n- use $$PREV[i] as whole and don't pass invalid representation like \"$$PREV\" OR \"$$PREV[]\" or i\n\nANSWER :\n",
"answer",
"What are my all issues in the triage stage under part FEAT-123? Summarize them",
"no tool is present to create excel file",
"Here are some few shots of how to create sub-task for a given tool based query, intermediate_steps and tool_description:\n",
"agent_scratchpad",
"format_instructions",
"correct_tool_name",
"reason",
"\nI want to use a tool which has a lots of arguments. I want to filter out only those arguments which can surely be extracted from the user query.\n\nBelow I provide the arguments that the tool can take along with their description:\n{arg_description}\n\n{format_instructions}\n\nBelow I provide the query, which needs to be referenced whiele deciding which arguments to filter out:\nQUERY : {query}\n\nALERT !!!\n- Don't create any new argument from user query, make sure that filtered argument have correct name.\n- If the Query contains specific keywords like $$PREV[i], then take it as a whole.\n- Stick to information provided in the user query and description of arguments.\n- Don't pollute the argument filtering with any assumptions.\n- Make sure that there are no parsing errors.\n",
"\nYou need to return a code block executing the above user query in the python.\n\nBelow I provide an example code block in python so that you know the desired output format:\nExample 1:\n```\n### Query: Calculate the difference in count between P1 and P2 issues\n\ndef count_difference(p1_issues, p2_issues):\n return len(p1_issues) - len(p2_issues)\ncount_difference(\"$$PREV[0]\", \"$$PREV[1]\")\n```\n\nExample 2:\n```\n### Query: Extract the first five tasks from the list of all tasks.\n\ndef first_five_tasks(tasks):\n return tasks[:5]\n\nfirst_five_tasks(\"$$PREV[0]\")\n```\n(note that there were other tool calls before this as well that were ommitted)\n\nYou may devise any function of your own using a combination of sum, variables, loops, if-else statements etc.\n\n- Make sure that the code is in correct syntax before returning.\n- Don't return anything else other than the code block. \n- Simply return the code block, nothing else to be returned\n\nYou have the following user query:\n{query}\n",
"We need to find the id of the object, so we must use the tool 'search_object_by_name' tool which searches for the object id based on the name of the object.",
"The tool 'who_am_i' is useful as there is a keyword 'my' which hints towards the user currently logged in. So, this tool can get the user id of the user currently logged in.",
"intermediate_steps",
"\nThere is an AI agent which is picking up tools under ReAct framework to solve user queries.\nIt misses picking up correct tool, being unable to reason out its usage for the given query.\n\nI provide you some few shots how to reason out the mistake highlight of a tool based on the user query and tool description:\n\nExample_Query : \"Prioritize my p0 issues\"\nTools missed: \nIncorrect tool uses:\nExample_MISTAKE : \"The tool 'who_am_i' is useful as there is a keyword 'my' which hints towards the user currently logged in. So, this tool can get the user id of the user currently logged in.\" \n\nExample_Query : \"Summarize high severity tickets from the customer UltimateCustomer\"\nTools missed:\nIncorrect tool uses:\nExample_MISTAKE :\"We need to find the id of the object, so we must use the tool 'search_object_by_name' tool which searches for the object id based on the name of the object.\"\n\n\nExample_Query : \"What are my all issues in the triage stage under part FEAT-123? Summarize them\"\nTools missed:\nIncorrect tool uses:\nExample_MISTAKE :\"We need to find the id of the object, so we must use the tool 'search_object_by_name' tool which searches for the object id based on the name of the object.\"\n\n- You need to provide an eye-catchy insight of why that tool should not be missed for the given query based on the user query and tool description. \n- You insight will help the agent to learn from its mistakes. Don't be super-specific to user query, keep the tool description in consideration. \n- Keep your insight within 20 words and at least 9 words. Present answer in a paragraph.\n\nUSER_QUERY : {query}\nTools missed : {tools_missed}\nIncorrect tool uses : {incorrect_tool_uses}\nAgent_MISTAKE : \n",
"no tool is present to search for youtube videos",
"\n\nBelow you are provided the tools available in toolkit and their description :\n{tools}\n\nFORMAT INSTRUCTIONS :\n{format_instructions}\n\nHere are a few examples of sample outputs:\n\nQUERY_EXAMPLE : What is the use of life?\nOUTPUT : {{\"answer\" : 0 , \"reason\" : \"The available tools are not useful to answer the query.\"}}\n\nQUERY_EXAMPLE : \"List all work items similar to TKT-420 and add the top 2 highest priority items to the current sprint\nOUTPUT : {{\"answer\" : 1 , \"reason\" : \"We can use get_similar_items, prioritize_objects, get_sprint_id, add_work_items_to_sprint to solve query.\"}}\n\n\nQUERY_EXAMPLE : Search for youtube videos of user id DEVU-67\nOUTPUT : {{\"answer\" : 0 ,'reason' : \"no tool is present to search for youtube videos\"}}\n\n\nQUERY_EXAMPLE : \"Create a excel file of work items in the current sprint\nOUTPUT : {{\"answer\" : 0 , \"reason\" : \"no tool is present to create excel file\"}}\n\nGive a similar answer, reason pair for the below query. If answer is 1, tell me what all tools you would use\n\nQUERY : {query}\n\nALERT !!\n- Make sure that there are no parsing errors.\n\n",
"Summarize high severity tickets from the customer UltimateCustomer",
"tool_name",
"The available tools are not useful to answer the query.",
"\nquery: {query}\n\nintermediate steps : {intermediate_steps}\n\ntool_name: {tool_name}\ntool_description: {tool_description}\n\ntool_input: {tool_input}\n\n\n",
"tool_input",
"\nThe user query is as follows:\n User_Query : {query}\n\nThe intermediate steps taken till now to solve the above query are as follows:\n{intermediate_steps}\n\nBelow is the next tool that needs to be used as next tool in intermediate steps: {tool_name}\n\nThe short description of the tool, to help you reason out, is as follows:\n{tool_description}\n\nYou are expected to create a sub-task for the above tool from the given user_query, tool_description and the intermediate steps taken till now.\n\nWhile creating the sub-task for above tool, adhere to tool description. \nDon't query tool for tasks which are not mentioned in tool description.\n\nFORMAT INSTRUCTIONS :\n{format_instructions}\n"
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | utils~add_tool.py |
tool_name = input("Tool Name")
tool_desc = input("Tool description")
arg_names = []
arg_descs = []
arg_types = []
while True:
arg_name = input("Argument name (type 'exit' if done): ").strip()
if arg_name.lower() == "exit":
break
arg_desc = input("Argument description: ").strip()
arg_type = input("Argument type: ").strip()
arg_names.append(arg_name)
arg_descs.append(arg_desc)
arg_types.append(arg_type)
tool_arg_dict = dict(zip(arg_names, arg_types))
tool_arg_descript_dict = dict(zip(arg_names, arg_descs))
NEW_TOOL_CLASS = """
from langchain.tools import BaseTool
from typing import Optional, Type, List, Any
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from tools.argument_mapping.get_args import fill_signature
from backend_llm.utils import llm
class {tool_name}(BaseTool):
name = {tool_name}
description = '''This tool is useful for {tool_description}:
The arguments:
''' + ", ".join({tool_arguments})
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Any:
signature = {tool_arg_dict}
arg_description = {tool_arg_descript_dict}
column_args = fill_signature(query,function_signatures= signature , arg_description=arg_description,tool_name=self.name)
li = []
for key, value in column_args.items():
x = {{
'argument_name': 'key',
'argument_value': 'value',
}}
li.append(x)
return li
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"Use the tool asynchronously."
raise NotImplementedError("custom_search does not support async")
""".format(tool_name=tool_name, tool_description=tool_desc, tool_arguments=arg_names,
tool_arg_dict=tool_arg_dict, tool_arg_descript_dict=tool_arg_descript_dict)
with open('tools/{tool_name}.py'.format(tool_name=tool_name),"w") as f:
f.write(NEW_TOOL_CLASS)
f.close()
| [] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | tools~get_similar_work_items.py | from langchain.tools import BaseTool
from typing import Optional, List, Any
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from utils.llm_utility import llm
from utils.get_args import fill_signature
class GetSimilarWorkItems(BaseTool):
name = "get_similar_work_items"
description = '''
USAGE :
- Use this tool when you want to get work_items similar to the current work_item.
- This tool returns a list of similar work_items for the given work_id.
'''
bag_of_words = set(["similar","similar items", "similar work_items", "similar work"])
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Any:
print('\ninside get_similar_work_items tool...')
signature = {
'work_id': str,
}
arg_description = {
'work_id': 'The ID of the work item for which you want to find similar items',
}
li = []
for key, value in signature.items():
arg_dtype = {
'argument_name': key,
'argument_value': value,
}
arg_descr = {
'argument_name': key,
'argument_value': arg_description[key],
}
x = fill_signature(query = query, arg_name = key , arg_dtype = arg_dtype , arg_descr = arg_descr, tool_name = self.name)
if x is not None:
li.append({
'argument_name': key,
'argument_value': x,
})
print('Extracted arguments are : ',li)
return li
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
| [
"\n \n USAGE :\n - Use this tool when you want to get work_items similar to the current work_item.\n - This tool returns a list of similar work_items for the given work_id. \n "
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | memory~memory.py | from queue import Queue
from langchain.docstore.document import Document
from langchain.vectorstores.chroma import Chroma
from utils.llm_utility import *
class Memory():
def __init__(self,vector_db, k=3) -> None:
self.queue = Queue(maxsize=10) # current List to be added to Long Term Memory
self.k = k
self.vector_db = vector_db
def stage(self, docs:Document):
self.queue.put(docs)
print("\033[91m {}\033[00m" .format('Document Staged in Memory...'))
def push(self):
print(f"Pushing {self.queue.qsize()} Documents...")
while not self.queue.empty():
self.vector_db.add_documents([self.queue.get()])
def pull(self, query:str,filter = None):
if filter == None:
results = self.vector_db.similarity_search(query, k=self.k , search_type='similarity')
else:
results = self.vector_db.similarity_search(query, k=self.k , search_type='similarity',filter = filter)
return results
def clear(self):
'''
Clears all items from the queue.
'''
self.queue = Queue(maxsize=10)
V_db = Chroma(embedding_function = embedding_func, persist_directory= 'database/agent_mistakes_db' ,
relevance_score_fn='similarity_search_with_score')
mistake_memory:Memory = Memory(k=3,vector_db=V_db)
| [] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | tools~get_sprint_id.py | from langchain.tools import BaseTool
from typing import Optional, List, Any
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from utils.llm_utility import llm
from utils.get_args import fill_signature
class GetSprintId(BaseTool):
name = "get_sprint_id"
description = '''
USAGE :
- This tool is used when we want to know the id of current sprint.
- Think of using it when user query contains keywords like "sprint"
'''
bag_of_words = set(["current sprint", "current sprint id", "sprint", "sprint id"])
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Any:
print('\ninside get_sprint_id tool...')
# signature = {}
# arg_description = {}
# column_args = fill_signature(query,function_signatures= signature ,arg_description=arg_description, tool_name = self.name)
li = []
# for key, value in column_args.items():
# x = {
# 'argument_name': key,
# 'argument_value': value,
# }
# li.append(x)
return li
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
| [
"\n USAGE :\n - This tool is used when we want to know the id of current sprint.\n - Think of using it when user query contains keywords like \"sprint\"\n "
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | tools~add_work_items_to_sprint.py | from langchain.tools import BaseTool
from typing import Optional, Type, List
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from utils.get_args import fill_signature
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from utils.llm_utility import llm
class AddWorkItemsToSprint(BaseTool):
name = "add_work_items_to_sprint"
description = '''
USAGE :
- Adds or assigns the given work items to the sprint.
- Need to fill the following arguments available for tool usage -->
- "sprint_id" : the id of current sprint
- "work_ids" : list of work-items to add to sprint
'''
bag_of_words = set(["add work items to sprint", "add work items", "add to sprint", " add", "assign", "assigning"])
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) :
print('\ninside add_work_items_to_sprint tool ...')
signature = {'work_ids': List[str],
'sprint_id': str ,
}
arg_description = {
'work_ids': 'A list of work item IDs to be added to the sprint',
'sprint_id': 'The ID of the sprint to which the work items should be added',
}
li = []
for key, value in signature.items():
arg_dtype = {
'argument_name': key,
'argument_value': value,
}
arg_descr = {
'argument_name': key,
'argument_value': arg_description[key],
}
x = fill_signature(query = query, arg_name = key , arg_dtype = arg_dtype , arg_descr = arg_descr, tool_name = self.name)
if x is not None:
li.append({
'argument_name': key,
'argument_value': x,
})
print('Extracted arguments are : ',li)
return li
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
| [
"\n USAGE :\n - Adds or assigns the given work items to the sprint. \n - Need to fill the following arguments available for tool usage -->\n - \"sprint_id\" : the id of current sprint\n - \"work_ids\" : list of work-items to add to sprint\n\n "
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | utils~parsers.py | from langchain.output_parsers import StructuredOutputParser, ResponseSchema
critique_response_schemas = [
ResponseSchema(name="answer", description="Return 1 if user query can be answered by the available tools based on tool description, else return 0."),
ResponseSchema(name="reason", description="Reason why available tools can/ cannot answer the user query based on tool descriptions.")
]
critique_parser = StructuredOutputParser.from_response_schemas(critique_response_schemas)
#_______________________________________________________________________________________________________________________________________________________________
sub_task_response_schemas = [
ResponseSchema(name="tool_input", description="The next consecutive sub-task in intermediate steps for the above tool based on above user_query and tool description"),
ResponseSchema(name="reason", description="Reason why the tool should be chosen as next consecutive tool in intermediate steps based on tool_description and user_query, at max 20 words and atleast 15 words")
]
sub_task_parser = StructuredOutputParser.from_response_schemas(sub_task_response_schemas)
#_______________________________________________________________________________________________________________________________________________________________
arg_filter_response_schemas = [
ResponseSchema(name="Arguments", description="The list of filtered arguments whose value is available in the user query"),
]
arg_filter_parser = StructuredOutputParser.from_response_schemas(arg_filter_response_schemas)
| [] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | tools~logic_tool.py | from langchain.tools import BaseTool
from typing import Optional, List, Any
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
import sys, os
sys.path.append(os.getcwd())
from utils.llm_utility import llm
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from utils.templates_prompts import LOGICAL_TEMPLATE
import ast
generate_code_prompt = PromptTemplate(template=LOGICAL_TEMPLATE, input_variables=['query' , 'language'])
generate_code = LLMChain(llm = llm , prompt=generate_code_prompt)
class LogicalTool(BaseTool):
name = "logic_tool"
description = '''
- Use this tool, for various logical operations like conditional statements, while loops, addition, subtraction, iterate over lists etc.
- The input to this tool is symbolic and the output is a pseudocode to execute the task on input.
- By symbolic, it means that it is in '$$PREV[i]' format, so, llm can't perform logic on it.
'''
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Any:
print('\ninside logic_tool tool...')
code = generate_code.run({'query' : query , 'language' : 'python'})
print("\033[97m {}\033[00m" .format('Generated Code : \n{i}'.format(i=code)))
li = []
li.append({
'code' : code,
})
return li
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async") | [
"language",
"\n - Use this tool, for various logical operations like conditional statements, while loops, addition, subtraction, iterate over lists etc. \n - The input to this tool is symbolic and the output is a pseudocode to execute the task on input.\n - By symbolic, it means that it is in '$$PREV[i]' format, so, llm can't perform logic on it.\n "
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | agent~mistakes_selection.py | from langchain.prompts import PromptTemplate
from utils.llm_utility import llm
from langchain.chains import LLMChain
from typing import List, Union
from langchain.docstore.document import Document
import ast
import sys, os
sys.path.append(os.getcwd())
from utils.templates_prompts import MISTAKE_SELECTION
from memory.memory import mistake_memory
prompt = PromptTemplate(template=MISTAKE_SELECTION, input_variables=["input", "mistake"])
chain = LLMChain(llm=llm, prompt=prompt)
def choose_mistake(mistake, tool_task):
return chain.run({'input':tool_task, 'mistake': mistake})
def analyse(user_query, wrong_tool_name, tool_task):
final_mistakes = []
if tool_task == None:
return final_mistakes
print("\033[91m {}\033[00m" .format('analyse (mistake_selection)'))
print("\033[91m {}\033[00m" .format('\tPulling mistakes from agent memory... (mistake_selection)'))
filter = {
'wrong_tool': wrong_tool_name,
}
# mistakes = mistake_memory.pull(query=user_query, filter=filter) if user_query != '' else ''
mistakes = mistake_memory.pull(query=tool_task, filter=filter) if user_query != '' else ''
mistaken_tool_set = set()
if isinstance(mistakes , str) or mistakes == []:
return 'No mistakes found relevant to this query'
i=0
for mistake in mistakes:
mistaken_tool = mistake.metadata['correct_tool']
if not mistaken_tool in mistaken_tool_set :
mistaken_tool_set.add(mistaken_tool)
ans = choose_mistake(mistake.metadata['learning'] , tool_task=mistake.page_content)
if '1' in ans:
i+=1
print("\033[91m {}\033[00m" .format('\tchosen_mistakes : {i} (mistake_selection)'.format(i=i)))
# print("\033[93m {}\033[00m" .format(prompt.template.format(input=user_query , mistake=mistake.page_content)))
final_mistakes.append(mistake)
return final_mistakes
# for mistake in mistakes:
# # mistaken_tool = mistake.metadata['correct_tool']
# # if not mistaken_tool in mistaken_tool_set:
# # mistaken_tool_set.add(mistaken_tool)
# # ans = choose_mistake(user_query=user_query , mistake=mistake.metadata['learning'])
# # if ans == 1:
# # i+=1
# # print("\033[91m {}\033[00m" .format('\tchosen_mistakes : {i} (mistake_selection)'.format(i=i)))
# # print("\033[93m {}\033[00m" .format(prompt.template.format(input=user_query , mistake=mistake.page_content)))
# final_mistakes.append(mistake)
# print("Final Mistakes = ", final_mistakes)
# return final_mistakes
| [
"input"
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | train_analysis~train.py | import pandas as pd
import sys, os
sys.path.append(os.getcwd())
from agent_executor.agent_executer import agent_executor
from langchain.docstore.document import Document
from icecream import ic
from langchain.callbacks import get_openai_callback
from utils.chains import *
from memory.memory import mistake_memory
data = pd.read_csv('DATA_DEVREV_72_tt_splitted/reducd_combined_train.csv' ).iloc[:1,:]
# print(data)
# query = 'List all high severity tickets coming in from slack from customer abc123 and generate a summary of them.'
# ground_json = '''
# [
# {
# "tool_name": "search_object_by_name",
# "arguments": [
# {
# "argument_name": "query",
# "argument_value": "Cust123"
# }
# ]
# },
# {
# "tool_name": "works_list",
# "arguments": [
# {
# "argument_name": "ticket.rev_org",
# "argument_value": "$$PREV[0]"
# },
# {
# "argument_name": "ticket.severity",
# "argument_value": ["high"]
# },
# {
# "argument_name": "ticket.source_channel",
# "argument_value": ["slack"]
# },
# {
# "argument_name": "type",
# "argument_value": ["ticket"]
# }
# ]
# },
# {
# "tool_name": "summarize_objects",
# "arguments": [
# {
# "argument_name": "objects",
# "argument_value": "$$PREV[1]"
# }
# ]
# }
# ]
# '''
# data = [(query , ground_json)]
# print(data.shape)
#___________________________________________________________________________________________
agent_executor.train()
def build_experience(x):
thoughts_action = '\n'.join(x['intermediate_thoughts'])
y = create_tool_experience_chain.run({"query": x['query'] , "agent_scratchpad" : thoughts_action ,
"correct_tool_name": x['correct_tool'] ,"tool_description": x['correct_tool_description'],
})
# print("TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT")
# print(create_tool_experience_chain.prompt)
return y.strip('\n').strip()
#___________________________________________________________________________________________
ct = 0
for i in range(len(data)):
print("\033[1;32m {}\033[00m" .format('QUERY COUNT : {i}'.format(i=i)))
query, ground_json = data.iloc[i,0], data.iloc[i,1]
# query, ground_json = data[0][0] , data[0][1]
print("\033[1;32m {}\033[00m" .format('QUERY : ') , "\033[93m {}\033[00m" .format(query))
print("\033[1;32m {}\033[00m" .format('Ground JSON :') , "\033[93m {}\033[00m" .format(ground_json))
agent_executor.get_tool_lists(ground_json)
with get_openai_callback() as cb:
x = agent_executor(inputs={"input": query})
thought_execution_chain , checkpoints = agent_executor.thought_execution_chain , agent_executor.checkpoints
for tool_index, value in checkpoints.items():
x = {
"query":query,
"correct_tool" :value['correct_tool'] ,
"wrong_tool" : value['wrong_tool'] ,
"wrong_tool_description" : value['wrong_tool_description'] ,
"correct_tool_description" :value['correct_tool_description'] ,
"intermediate_thoughts" : thought_execution_chain[:tool_index],
"tool_thought": value['thought']
}
human_eval = 'n'
# human_eval = input("Do you want to correct the reasoning? (y/n) :")
if human_eval.lower() == 'n':
experience = build_experience(x)
else :
experience = input("This has been the mistake summary : \n\t{x}. \nPlease write the correct reasoning :".format(x=x))
# learning = '- MISTAKE_HIGHLIGHT : {b}\n'.format(b = experience)
learning = '- MISTAKE_HIGHLIGHT : {b}\n'.format(b = experience)
metadata = {
# 'query': x['query'],
'correct_tool': x['correct_tool'],
'wrong_tool': x['wrong_tool'],
'learning': learning
}
print('metadata : ' , metadata)
print('tool_thought : ' , x['tool_thought'])
doc = Document(page_content=x['tool_thought'] , metadata=metadata)
mistake_memory.stage(doc)
print("\033[96m {}\033[00m" .format(agent_executor.return_schema))
user_decision = 'y'
# user_decision = input('Do you want to save the experience? (y/n) : ')
if user_decision.lower() == 'y':
ct+= mistake_memory.queue.qsize()
mistake_memory.push()
else:
mistake_memory.clear()
print("\033[91m {}\033[00m" .format('skipping experience saving...'))
print("\033[91m {}\033[00m" .format('---------------- QUERY_COST : $ {cost}---------------- MISTAKES LEARNED : {ct}-------------------- QUERY TOKENS : {tokens}-----------------'.format(cost = round(cb.total_cost, 5) ,
ct = ct, tokens = cb.total_tokens)))
| [] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | memory~tool_memory.py | import sys, os
sys.path.append(os.getcwd())
from memory.memory import Memory
from utils.llm_utility import *
from langchain.vectorstores.chroma import Chroma
from langchain.docstore.document import Document
from evaluator import *
#__________________________________________________________________________________________________________________________
tool_database = Chroma(embedding_function = embedding_func, persist_directory= 'database/tool_mistake_db' ,
relevance_score_fn='similarity_search_with_score')
tool_mistake_memory = Memory(k=2, vector_db=tool_database)
#__________________________________________________________________________________________________________________________
def build_tool_experience(correct_tool, llm_tool):
# try:
response, analogy, correct_arguments = validate(correct_tool, llm_tool)
# except:
# response, analogy, correct_arguments = self.true_tools[self.tool_count] == output.tool, " ", " "
if response is not True:
print("\033[91m {}\033[00m" .format('Tool Arguments are not correct... (tool_memory)'))
print("\033[91m {}\033[00m" .format('Staging tool experience in Memory... (tool_memory)'))
experience = analogy
metadata = {
'tool_name': llm_tool[0]['tool_name']
}
doc = Document(page_content=experience , metadata=metadata)
tool_mistake_memory.stage(doc)
return response, correct_arguments
def retrieve_tool_experience(tool_name:str, user_query:str):
if(tool_mistake_memory.queue.empty() == False):
tool_mistake_memory.push()
filter = {
'tool_name':tool_name,
}
print("\033[91m {}\033[00m" .format('\nPulling argument mistakes from tool memory... (tool_memory)'))
tool_mistakes = tool_mistake_memory.pull(query=user_query,filter=filter) if user_query != '' else ''
return tool_mistakes
| [] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | tools~prioritize_objects.py | from langchain.tools import BaseTool
from typing import Optional, List, Any
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from utils.llm_utility import llm
from utils.get_args import fill_signature
class Prioritize(BaseTool):
name = "prioritize_objects"
description = '''
- Use this tool when asked to prioritize the objects.
'''
bag_of_words = set(["prioritize", "priority", "prioritize objects", "prioritization"])
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Any:
print('\ninside Prioritize_objects tool...')
signature = {
'objects': List[str],
}
arg_description = {
'objects': 'the list of objects to be prioritized',
}
li = []
for key, value in signature.items():
arg_dtype = {
'argument_name': key,
'argument_value': value,
}
arg_descr = {
'argument_name': key,
'argument_value': arg_description[key],
}
query = query.strip('\n').strip()
ans = query ## $$PREV[*] is a special keyword that means "use the previous value of this argument"
if len(query) != 9:
ans = fill_signature(query = query, arg_name = key , arg_dtype = arg_dtype , arg_descr = arg_descr, tool_name = self.name)
if ans.strip('\n').strip() != 'NONE':
li.append({
'argument_name': key,
'argument_value': ans,
})
print('Extracted arguments are : ',li)
return li
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async") | [
"\n - Use this tool when asked to prioritize the objects. \n "
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | utils~chains.py |
from utils.templates_prompts import *
from langchain.chains import LLMChain
from utils.llm_utility import *
llm_critique = LLMChain(llm = small_llm , prompt=critique_prompt)
sub_task_chain = LLMChain(prompt=sub_task_prompt , llm=llm)
create_tool_experience_chain = LLMChain(llm=small_llm, prompt=missed_tool_prompt) | [] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | train_analysis~predict.py | import pandas as pd
import sys, os
sys.path.append(os.getcwd())
from agent_executor.agent_executer import agent_executor
from langchain.docstore.document import Document
from langchain.callbacks import get_openai_callback
import time
import json
data = pd.read_csv('DATA_DEVREV_72_tt_splitted/test_DATA_multi_class_3.csv').iloc[:10,: ]
path = 'prediction_data/test_DATA_multi_class_3.csv'
if not os.path.exists(path):
prediction_df = pd.DataFrame(columns=['query' , 'groundJson' , 'predicted_json' , 'latency (in seconds)' , 'queryCost' , 'queryTokens'])
prediction_df.to_csv(path, index=False)
prediction_df = pd.read_csv(path)
agent_executor.eval()
ct = 0
for i in range(len(data)):
print("\033[1;32m {}\033[00m" .format('QUERY COUNT : {i}'.format(i=i)))
query, ground_json = data.iloc[i , 0] , data.iloc[i , 1]
print("\033[1;32m {}\033[00m" .format('QUERY : ') , "\033[93m {}\033[00m" .format(query))
print("\033[1;32m {}\033[00m" .format('Ground JSON :') , "\033[93m {}\033[00m" .format(ground_json))
with get_openai_callback() as cb:
start = time.time()
x = agent_executor(inputs={"input": query})
try:
predict_json = json.dumps(agent_executor.return_schema)
print(type(predict_json))
latency = time.time() - start
query_cost = cb.total_cost
query_tokens = cb.total_tokens
prediction_df.loc[len(prediction_df)] = [query , ground_json , predict_json , round(latency,2) , round(query_cost,5) , query_tokens]
except:
print("Skipping query ....")
print("\033[91m {}\033[00m".format('---------------- QUERY_COST : $ {cost}---------------- QUERY TOKENS : {tokens}-----------------'.format(cost = round(cb.total_cost, 5) ,
tokens = cb.total_tokens)))
prediction_df.to_csv(path, index=False)
import pandas as pd
# df1 = pd.read_csv('prediction_data/test_DATA_multi_class_1.csv')
# df2 = pd.read_csv('prediction_data/test_DATA_multi_class_2.csv')
# df3 = pd.read_csv('prediction_data/test_DATA_multi_class_3.csv')
# df4 = pd.read_csv('prediction_data/test_DATA_single.csv')
# df = pd.concat([df1, df2, df3, df4], axis = 0)
# print(df.shape)
# df.to_csv('prediction_data/FINAL_CSV.csv') | [] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | utils~get_args.py | import sys , os
sys.path.append(os.getcwd())
from typing import List, Union, Any, Dict
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from utils.llm_utility import llm, small_llm
from memory.tool_memory import retrieve_tool_experience
from utils.templates_prompts import TOOLS_PROMPT_EXAMPLES, ARG_FILTER_PROMPT
import re, ast
from utils.tool_output_parser import parser
from icecream import ic
from utils.parsers import arg_filter_parser
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.output_parsers import OutputFixingParser
from langchain.output_parsers import RetryWithErrorOutputParser
#-------------------------------------------------------------------------------------------------------------------------------------------------
# response_schemas = [
# ResponseSchema(name="argument name", description="the name of the argument"),
# ResponseSchema(name="argument value", description="The value of the argument extracted from the query. Don't write anything else here.")
# ]
# output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
arg_extraction_prompt = PromptTemplate(template=TOOLS_PROMPT_EXAMPLES ,
input_variables=['arg_description','arg_dtype' ,'user_query'] , # ,'memory_examples'
# partial_variables= {"format_instructions" : output_parser.get_format_instructions()}
)
signature_chain = LLMChain(llm = small_llm, prompt = arg_extraction_prompt , verbose=False)
arg_filter_prompt = PromptTemplate(template=ARG_FILTER_PROMPT,
input_variables=['query', 'arg_description'],
partial_variables={"format_instructions": arg_filter_parser.get_format_instructions()}
)
arg_filter_chain = LLMChain(llm = small_llm, prompt = arg_filter_prompt , verbose=False)
# new_parser = OutputFixingParser.from_llm(parser=output_parser, llm=llm)
#-------------------------------------------------------------------------------------------------------------------------------------------------
def fill_signature(query:str, arg_name:str , arg_dtype: dict , arg_descr :dict, tool_name:str)->Dict[str,Any] :
if(len(query.strip('\n').strip().split()) == 1):
return query
extracted_args = signature_chain.run({'arg_description':arg_descr,'arg_dtype':arg_dtype, 'user_query':query})
extracted_args = extracted_args.strip('\n').strip(' ')
extracted_args = re.sub(r'""', '"',extracted_args)
if arg_dtype['argument_value'] == List[str]:
if extracted_args[0] != '[':
extracted_args = '['+extracted_args+']'
if arg_dtype['argument_value'] == str:
if extracted_args[0]=='[':
extracted_args= extracted_args[1:-1]
return extracted_args.strip('\n').strip(' ')
#-------------------------------------------------------------------------------------------------------------------------------------------------
def filter_arguments(query:str, arg_name , arg_descr :dict)->List[str] :
argument_input = '\n'.join(['{name} : {descr}'.format(name = arg , descr = arg_descr[arg]) for arg in arg_name])
response = arg_filter_chain.run({'query':query, 'arg_description':argument_input})
x = None
try :
output = arg_filter_parser.parse(response)
print(output)
x = output['Arguments']
except Exception as e:
new_parser = OutputFixingParser.from_llm(parser=arg_filter_parser, llm=llm)
output = new_parser.parse(response)
print(output)
x = output['Arguments']
final_args = []
if type(x) is str:
x = x.split(',')
for arg in x:
arg = arg.strip().strip('\n')
if arg in arg_name:
final_args.append(arg)
return final_args
| [
"user_query",
"arg_description",
"format_instructions"
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | tools~who_am_i.py | from langchain.tools import BaseTool
from typing import Optional, List, Any
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from utils.llm_utility import llm
from utils.get_args import fill_signature
class WhoAmI(BaseTool):
name = "who_am_i"
description = '''
- Use this tool when person uses personal pronouns like "my", "mine" ,"I" , "his" , "her", "him" in the query
- This tool returns the user_id of the person being referred in query with personal pronoun.
- The user_id can then be used by other tools like works_list
'''
bag_of_words = set(["my", "me", "mine", " i ", "myself", "who am i", "whoami"])
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Any:
print('\ninside who_am_i tool...')
return list()
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
| [
"\n - Use this tool when person uses personal pronouns like \"my\", \"mine\" ,\"I\" , \"his\" , \"her\", \"him\" in the query \n - This tool returns the user_id of the person being referred in query with personal pronoun.\n - The user_id can then be used by other tools like works_list\n \n "
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | utils~llm_utility.py | from langchain.llms.openai import OpenAI
import yaml
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv()) # read local .env file
from langchain.callbacks import StdOutCallbackHandler, WandbCallbackHandler
from datetime import datetime
from langchain.embeddings import OpenAIEmbeddings
from chromadb.api.types import Documents, Embeddings
# session_group = datetime.now().strftime("%m.%d.%Y_%H.%M.%S")
# wandb_callback = WandbCallbackHandler(
# job_type="inference",
# project="langchain_callback_demo",
# group=f"minimal_{session_group}",
# name="llm",
# tags=["test"],
# )
# callbacks = [StdOutCallbackHandler(), wandb_callback]
#_________________________________________________________________________________________
# small_llm = OpenAI(temperature=0.0 ,frequency_penalty = 0.1 ,n = 5 ,max_tokens=1000, model="gpt-3.5-turbo-instruct")
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(temperature=0, model = 'gpt-4-1106-preview')
small_llm = ChatOpenAI(temperature=0, model = 'gpt-4-1106-preview')
# llm = OpenAI(temperature=0.00 ,frequency_penalty = 0.1 ,n = 5 ,max_tokens=1000, model="gpt-3.5-turbo-instruct")
embedding_func = OpenAIEmbeddings()
#_________________________________________________________________________________________
def load_config(CONFIG_PATH):
with open(CONFIG_PATH, 'r') as f:
config = yaml.safe_load(f)
return config
# config = load_config('backendPython/config.yaml')
#_________________________________________________________________________________________ | [] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | tools~work_list.py | from langchain.tools import BaseTool
from typing import Optional, Type, List, Any, Tuple
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from utils.get_args import *
from utils.llm_utility import llm
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
class WorkList(BaseTool):
name = "works_list"
description = '''
- This tool can search and return the relevant work-items on the basis of various search filters.
- Below are the arguments present which can be used to filter the work-items .
- Whenever the query contains below arguments as keywords, give this tool a try.
Following are the possible arguments with their description that the tool can take -->
- 'applies_to_part': for accessing work items applicable to a particular part of the project.
- 'created_by': for accessing the work items created by a particular person.
- 'issue.priority': For accessing work_items with issues of a particular priority. Can be either of types --> "p0" , "p1" , "p2".
- 'issue.rev_orgs': For accessing the work-items with issues of provided rev_orgs.
- 'limit' : Limitig the maximum no. of work itmes to return back, DEFAULT : "all"
- 'owned_by': Accessing the work items owned by a particular user id
- 'stage.name': Accessing work items belonging to a particular stage
- 'ticket.needs_response': Accessing work_items with tickets that needs response or not, must be either True or False,
- 'ticket.rev_org': Accessing work_items with ticket belonging to a particular rev_org
- 'ticket.severity': Accessing work items on the basis of ticket severity. MUST BE ONE OF --> 'blocker' , 'high' , 'medium' , 'low',
- 'ticket.source_channel': Accessing the work-items with tickets belonging to the provided source channel
- 'type': Accessing work-items on the basis of type, MUST BE one of --> 'issues', 'ticket' , 'task'
'''
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Any:
print('\ninside work_list tool...')
filtered_signature , filtered_arg_description = self._filtered_arguments(query)
li = []
for key, value in filtered_signature.items():
arg_dtype = {
'argument_name': key,
'argument_value': value,
}
arg_descr = {
'argument_name': key,
'argument_value': filtered_arg_description[key],
}
x = fill_signature(query = query, arg_name = key , arg_dtype = arg_dtype , arg_descr = arg_descr, tool_name = self.name)
if 'NONE' in x:
continue
# if filtered_signature[key] == List[str]:
# print('ooooooooooooooooooooo')
# if x[0] != '[':
# x = '[' + x + ']'
# if x.strip('\n').strip() != 'NONE':
li.append({
'argument_name': key,
'argument_value': x,
})
print('Extracted arguments are : ',li)
return li
#____________________________________________________________________________________________________________________________________
def _filtered_arguments(self, query: str) -> Tuple[dict, dict]:
"""Returns the filtered arguments and their descriptions."""
signature = {
'applies_to_part' : List[str] ,
'created_by': List[str] ,
'issue.rev_orgs': List[str] ,
'owned_by': List[str] ,
'ticket.needs_response': bool ,
'ticket.rev_org': List[str] ,
'ticket.source_channel': str ,
}
arg_description = {
'applies_to_part': 'for accessing work items applicable to a particular part of the project.',
'created_by': 'for accessing the work items created by a particular person.',
'issue.priority':' For accessing work_items with issues of a particular priority. Can be either of types --> "p0" , "p1" , "p2".' ,
'issue.rev_orgs': 'For accessing the work-items with issues of provided rev_orgs.',
'limit' : 'Limiting the maximum no. of work items to return back, DEFAULT : "all" ',
'owned_by': 'Accessing the work items owned by a particular user id',
'stage.name':' Accessing work items belonging to a particular stage',
'ticket.needs_response': 'Accessing work_items with tickets that needs response or not, must be either "True" or "False"',
'ticket.rev_org':' Accessing work_items with ticket belonging to a particular rev_org',
'ticket.severity': "Accessing work items on the basis of ticket severity. MUST BE ONE OF --> 'blocker' , 'high' , 'medium' , 'low'," ,
'ticket.source_channel':' Accessing the work-items with tickets belonging to the provided source channel',
'type': "Accessing work-items on the basis of type, MUST BE one of --> 'issues', 'ticket' , 'task'"
}
filtered_signature = {}
filtered_arg_description = {}
query = query.lower().strip()
arguments = arg_description.keys()
if 'p0'in query or 'p1'in query or 'p2'in query:
filtered_signature['issue.priority'] = str
filtered_arg_description['issue.priority'] = arg_description['issue.priority']
if 'all' in query or 'limit..\b' in query:
filtered_signature['limit'] = int
filtered_arg_description['limit'] = arg_description['limit']
if 'issues' in query or 'ticket' in query or 'task' in query:
filtered_signature['type'] = List[str]
filtered_arg_description['type'] = arg_description['type']
if 'blocker' in query or 'high' in query or 'medium' in query or 'low' in query:
filtered_signature['ticket.severity'] = List[str]
filtered_arg_description['ticket.severity'] = arg_description['ticket.severity']
if 'stage' in query:
filtered_signature['stage.name'] = List[str]
filtered_arg_description['stage.name'] = arg_description['stage.name']
if 'channel' in query.lower():
filtered_signature['ticket.source_channel'] = str
filtered_arg_description['ticket.source_channel'] = arg_description['ticket.source_channel']
x = set(filtered_signature.keys())
x.add('issue.priority')
x.add('limit')
x.add('ticket.source_channel')
x.add('type')
x.add('ticket.severity')
x.add('stage.name')
remaining_arguments = set(arguments) - x
remaining_arguments = filter_arguments(query, remaining_arguments, arg_description)
for arg in remaining_arguments:
filtered_signature[arg] = signature[arg]
filtered_arg_description[arg] = arg_description[arg]
print(filtered_arg_description)
return filtered_signature, filtered_arg_description
#____________________________________________________________________________________________________________________________________
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
| [
"\n - This tool can search and return the relevant work-items on the basis of various search filters.\n - Below are the arguments present which can be used to filter the work-items .\n - Whenever the query contains below arguments as keywords, give this tool a try.\n \n Following are the possible arguments with their description that the tool can take -->\n - 'applies_to_part': for accessing work items applicable to a particular part of the project.\n - 'created_by': for accessing the work items created by a particular person.\n - 'issue.priority': For accessing work_items with issues of a particular priority. Can be either of types --> \"p0\" , \"p1\" , \"p2\".\n - 'issue.rev_orgs': For accessing the work-items with issues of provided rev_orgs.\n - 'limit' : Limitig the maximum no. of work itmes to return back, DEFAULT : \"all\" \n - 'owned_by': Accessing the work items owned by a particular user id\n - 'stage.name': Accessing work items belonging to a particular stage\n - 'ticket.needs_response': Accessing work_items with tickets that needs response or not, must be either True or False,\n - 'ticket.rev_org': Accessing work_items with ticket belonging to a particular rev_org\n - 'ticket.severity': Accessing work items on the basis of ticket severity. MUST BE ONE OF --> 'blocker' , 'high' , 'medium' , 'low',\n - 'ticket.source_channel': Accessing the work-items with tickets belonging to the provided source channel\n - 'type': Accessing work-items on the basis of type, MUST BE one of --> 'issues', 'ticket' , 'task'\n "
] |
2024-01-10 | Sar2580P/Dev-Rev-Inter-IIT | agent~tool_collection.py | from langchain.tools import BaseTool
from typing import Optional, List, Any
import sys , os
sys.path.append(os.getcwd())
from tools.work_list import WorkList
from tools.summarize_objects import Summarize
from tools.add_work_items_to_sprint import AddWorkItemsToSprint
from tools.create_actionable_tasks_from_text import CreateActionableTasksFromText
from tools.get_similar_work_items import GetSimilarWorkItems
from tools.get_sprint_id import GetSprintId
from tools.prioritize_objects import Prioritize
from tools.search_object_by_name import SearchObjectByName
from tools.who_am_i import WhoAmI
from utils.llm_utility import llm
from tools.logic_tool import LogicalTool
import icecream as ic
task_tools = [
WhoAmI(),
SearchObjectByName(),
GetSprintId(),
AddWorkItemsToSprint(),
GetSimilarWorkItems(),
WorkList() ,
Summarize() ,
# LogicalTool(),
CreateActionableTasksFromText(),
Prioritize(),
]
def get_relevant_tools(query: str ) -> List[BaseTool]:
"""Returns the list of relevant tools for the query."""
relevant_tools = []
for tool in task_tools:
if not hasattr(tool, "bag_of_words"):
relevant_tools.append(tool)
continue
# if tool.name == 'search_object_by_name':
# relevant_tools.append(tool)
# continue
tool_bag_of_words = tool.bag_of_words
for word in tool_bag_of_words:
if word in query.lower().strip():
relevant_tools.append(tool)
break
return relevant_tools
# x = get_relevant_tools("Summarize all tickets needing a response in the 'support' rev organization.")
# print(x)
| [] |
2024-01-10 | LLNL/AutoCog | autocog~lm~__init__.py |
from .llama import Llama
from .openai import OpenAI
from .transformers import TfLM
| [] |
2024-01-10 | LLNL/AutoCog | share~mmlu-exams~utility.py |
import os
import sys
import json
import time
import tqdm
import random
import asyncio
import itertools
from autocog import CogArch
from autocog.lm import OpenAI, TfLM, Llama
mcq_checkers = {
'select' : lambda r,d,i: int(r["answer"]) == i+1,
'repeat' : lambda r,d,i: r["answer"] == d['choices'][i]
}
def mmlu_create_arch(library_path, patterns):
arch = CogArch()
scorers = {}
for (tag,(pattern,mode,kwargs)) in patterns.items():
arch.load(tag=tag, filepath=f'{library_path}/{pattern}-{mode}.sta', **kwargs)
scorers.update({ tag : mcq_checkers[mode] })
return (arch, scorers)
def mmlu_register_openai(arch, length, **kwargs):
arch.orchestrator.LMs.update({ k : OpenAI(max_tokens=l, **kwargs) for (k,l) in length.items() })
def mmlu_register_tflm(arch, length, model_path='gpt2-medium', device='auto', **kwargs):
model_kwargs = TfLM.create(model_path=model_path, device=device)
arch.orchestrator.LMs.update({ k : TfLM(completion_kwargs={ 'max_new_tokens' : l }, **model_kwargs, **kwargs) for (k,l) in length.items() })
def mmlu_register_llama_cpp(arch, length, model_path='/workspace/models/llama/7B/ggml-model-q4_0.bin', n_ctx=2048, **kwargs):
model_kwargs = Llama.create(model_path=model_path, n_ctx=n_ctx)
arch.orchestrator.LMs.update({ k : Llama(completion_kwargs={ 'max_tokens' : l }, **model_kwargs, **kwargs) for (k,l) in length.items() })
def mmlu_register_local(arch, model, length, size=None, quant=None, use_path_length_normalization=False, model_basedir='/workspace/models', **kwargs):
text_length = '-'.join([ str(l) for (k,l) in sorted(length.items(), key=lambda x: x[0]) ])
if quant is None:
mmlu_register = mmlu_register_tflm
if size is None:
label = f'{model}-{text_length}'
model_path = model
else:
label = f'{model}-{size}-{text_length}'
model_path = f'{model_basedir}/{model}/{size}'
assert os.path.exists(model_path), f"model_path={model_path}"
else:
mmlu_register = mmlu_register_llama_cpp
if size is None:
label = f'{model}-{quant}-{text_length}'
model_path = f'{model_basedir}/{model}/ggml-model-{quant}.bin'
else:
label = f'{model}-{size}-{quant}-{text_length}'
model_path = f'{model_basedir}/{model}/{size}/ggml-model-{quant}.bin'
assert os.path.exists(model_path), f"model_path={model_path}"
label = label.replace('/','_')
if use_path_length_normalization:
label += '-norm'
mmlu_register(arch, model_path=model_path, use_path_length_normalization=use_path_length_normalization, length={ 'text' : 0, 'thought' : length['thought'], 'justification' : length['justification']}, **kwargs)
return label
def mmlu_data(dataset_path=None):
if os.path.exists('mmlu-data.json'):
return json.load(open('mmlu-data.json'))
if dataset_path is None:
dataset_path = './mmlu-data'
if not os.path.exists(dataset_path):
raise NotImplementedError("Download MMLU dataset")
raise NotImplementedError("Extract MMLU dataset to single JSON")
def mmlu_list(data):
modes = list(set([ d['mode'] for d in data ]))
print(f"By modes: ({len(modes)})")
for mode in modes:
mmlu_subset = [ d for d in data if d['mode'] == mode ]
print(f" - {mode}: {len(mmlu_subset)}")
topics = list(set([ d['topic'] for d in data ]))
print(f"By topics: ({len(topics)})")
for topic in topics:
mmlu_subset = [ d for d in data if d['topic'] == topic ]
print(f" - {topic}: {len(mmlu_subset)}")
for mode in modes:
mmlu_subsubset = [ d for d in mmlu_subset if d['mode'] == mode ]
if len(mmlu_subsubset) > 0:
print(f" - {mode}: {len(mmlu_subsubset)}")
def mmlu_subset(dataset, topic=None, mode=None, limit=None, shuffle=True):
data = []
for d in dataset:
on_topic = topic is None or ( isinstance(topic,str) and d['topic'] == topic ) or ( isinstance(topic,list) and d['topic'] in topic )
on_mode = mode is None or ( isinstance(mode, str) and d['mode' ] == mode ) or ( isinstance(mode, list) and d['mode' ] in mode )
if on_topic and on_mode:
data.append(d)
if shuffle:
random.shuffle(data)
if limit is not None:
data = data[:limit]
return data
def mmlu_exec(arch, scorers, dataset, max_retry=3):
versions = scorers.keys()
results = { v : [] for v in versions }
workload = list(itertools.product(versions,dataset))
random.shuffle(workload)
for (version,data) in tqdm.tqdm(workload):
idx = ord(data["answer"])-ord('A')
res = None
cnt = 0
while res is None and cnt < max_retry:
try:
res = asyncio.run(arch(version, question=data['question'], choices=data['choices']))
res = (data, res, scorers[version](res,data,idx))
except Exception as e:
cnt += 1
print(f"EXCEPTION[{cnt}]: {e}")
time.sleep(1.)
res = None
results[version].append(res)
return results | [] |
2024-01-10 | LLNL/AutoCog | autocog~utility~args2arch.py |
import os
import sys
import json
import argparse
from ..config import version
from ..architecture.base import CognitiveArchitecture
from ..architecture.orchestrator import Serial, Async
from ..architecture.utility import PromptTee
from ..lm import OpenAI, TfLM, Llama
LMs = { 'OpenAI' : OpenAI, 'TfLM' : TfLM, 'LLaMa' : Llama }
def argparser():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--version', action='version', version=f'AutoCog {version}') # TODO `autocog.version:str=read('VERSION')`
parser.add_argument('--orch', help="""Type of orchestrator: `serial` or `async`.""", default='serial')
parser.add_argument('--lm', action='append', help="""Inlined JSON or path to a JSON file: `{ "text" : { "cls" : "OpenAI", ... } }` see TODO for details.""")
parser.add_argument('--program', action='append', help="""Inlined JSON or path to a JSON file: `{ "writer" : { "filepath" : "./library/writer/simple.sta", ... } }` see TODO for details.""")
parser.add_argument('--tool', action='append', help="""Inlined JSON or path to a JSON file: `{ "search" : { "cls" : "SerpApi", ... } }` see TODO for details.""")
parser.add_argument('--prefix', help="""String to identify this instance of AutoCog (used when displaying and saving the prompts)""", default='autocog')
parser.add_argument('--tee', help="""Filepath or `stdout` or `stderr`. If present, prompts will be append to that file as they are executed.""")
parser.add_argument('--fmt', help="""Format string used to save individual prompts to files. If present but empty (or `default`), `{p}/{c}/{t}-{i}.txt` is used. `p` is the prefix. `c` is the sequence id of the call. `t` is the prompt name. `i` is the prompt sequence id. WARNING! This will change as the schema is obsolete!""")
parser.add_argument('--serve', help="""Whether to launch the flask server.""", action='store_true')
parser.add_argument('--host', help="""Host for flask server.""", default='localhost')
parser.add_argument('--port', help="""Port for flask server.""", default='5000')
parser.add_argument('--debug', help="""Whether to run the flask server in debug mode.""", action='store_true')
parser.add_argument('--command', action='append', help="""Inlined JSON or path to a JSON file: `{ 'callee' : 'writer', ... }` see TODO for details.""")
parser.add_argument('--opath', help="""Directory where results are stored.""", default=os.getcwd())
return parser
def parse_json(arg):
if os.path.exists(arg):
return json.load(open(arg))
else:
return json.loads(arg)
def parse_lm(cls:str, **kwargs):
global LMs
if cls in LMs:
cls = LMs[cls]
else:
raise Exception(f"Unknown LM class: {cls} (should be one of {','.join(LMs.keys())})")
model_kwargs = kwargs['model'] if 'model' in kwargs else {}
model_kwargs = cls.create(**model_kwargs)
if 'config' in kwargs:
model_kwargs.update({ "completion_kwargs" : kwargs['config'] })
return cls(**model_kwargs)
def parse_lms(lms):
return { fmt : parse_lm(**lm) for (fmt,lm) in lms.items() }
def parseargs(argv):
parser = argparser()
args = parser.parse_args(argv)
pipe_kwargs = { 'prefix' : args.prefix }
if args.tee is not None:
if args.tee == 'stdout':
pipe_kwargs.update({ 'tee' : sys.stdout })
elif args.tee == 'stderr':
pipe_kwargs.update({ 'tee' : sys.stderr })
else:
pipe_kwargs.update({ 'tee' : open(args.tee,'w') })
if args.fmt is not None:
if args.fmt == '' or args.fmt == 'default':
pipe_kwargs.update({ 'fmt' : '{p}/{c}/{t}-{i}.txt' })
else:
pipe_kwargs.update({ 'fmt' : args.fmt })
if args.orch == 'serial':
Orch = Serial
elif args.orch == 'async':
Orch = Async
else:
raise Exception(f"Unknown Orchestrator: {args.orch}")
arch = CognitiveArchitecture(Orch=Orch, pipe=PromptTee(**pipe_kwargs))
if args.lm is not None:
for lm in args.lm:
arch.orchestrator.LMs.update(parse_lms(parse_json(lm)))
programs = {}
if args.program is not None:
for prog in args.program:
programs.update(parse_json(prog))
for (tag,program) in programs.items():
arch.load(tag=tag, **program)
tools = {}
if args.tool is not None:
for tool in args.tool:
tools.update(parse_json(tool))
for (tag,tool) in tools.items():
raise NotImplementedError()
return {
'arch' : arch, 'serve' : args.serve, 'opath' : args.opath,
'host' : args.host, 'port' : int(args.port), 'debug' : args.debug,
'commands' : None if args.command is None else [ parse_json(cmd) for cmd in args.command ]
}
| [] |
2024-01-10 | g-emarco/llm-agnets | utils~callbacks.py | from typing import Dict, Any, List, Optional
from uuid import UUID
from langchain.callbacks.base import BaseCallbackHandler
class LLMInstrumentationHandler(BaseCallbackHandler):
def on_agent_action(self, action: str, **kwargs) -> None:
print(f"My custom handler, action: {action}")
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> Any:
print(
f"My custom handler, prompts: {prompts}," f" \n {kwargs=} \n {serialized=}"
)
| [] |
2024-01-10 | g-emarco/llm-agnets | agents~get_agents.py | from langchain.llms import VertexAI
from tools.tools import get_google_search, scrape_linkedin_profile
from langchain.agents import initialize_agent, Tool, AgentExecutor
from langchain.agents import AgentType
from langchain.agents.agent_toolkits import GmailToolkit
gmail_toolkit = GmailToolkit()
gmail_tools = [gmail_toolkit.get_tools()[0]]
def get_gmail_agent() -> AgentExecutor:
llm = VertexAI(temperature=0.5, verbose=True, max_output_tokens=1000)
agent = initialize_agent(
gmail_tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
return agent
def get_search_agent() -> AgentExecutor:
llm = VertexAI(temperature=0, verbose=True, max_output_tokens=1000)
tools_for_agent = [
Tool(
name="GoogleSearch",
func=get_google_search,
description="useful for when you need get a google search result",
),
Tool(
name="scrape_linkedin_profile",
func=scrape_linkedin_profile,
description="useful for getting information on a Linkedin profile url",
),
]
agent = initialize_agent(
tools_for_agent,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
return agent
| [] |
2024-01-10 | DavidMChan/ArXiV-Notify | arxivnotify.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ArXiV Notify script
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Copyright David Chan, 2018
from __future__ import unicode_literals, print_function
# HTML Request sending and parsing
import urllib
from xml.etree import ElementTree
import requests
# Import time utilities for handling the time values
import datetime
import dateutil.parser
import time
# Import the config parser
import configparse
# Import anthropic for summarization
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
## Build an ArXiV API Query which will query for the key
def build_query(queries, page, num_elements):
query = "http://export.arxiv.org/api/query?search_query="
search_element = ""
if len(queries) == 0:
search_element = '""'
for i in range(len(queries)):
search_element = search_element + '"{}"'.format(urllib.parse.quote(str(queries[i])))
if i + 1 != len(queries):
search_element = search_element + "+OR+"
suffix = "&sortBy=lastUpdatedDate&sortOrder=descending&start={}&max_results={}".format(str(page), str(num_elements))
return query + search_element + suffix
## Fetch the articles which are up to date
# that is, have been updated in the last day
def fetch_queries(queries, query_time):
do_continue = True
current_page = 0 # Which current page we are on
pager_interval = 30 # How many articles to fetch at once
fetched_data = [] # Each of the articles, their abstracts, and links
while do_continue:
# Fetch the next page of articles
q = build_query(queries, current_page * pager_interval, pager_interval)
query_page = urllib.request.urlopen(q)
# Convert to a string and parse
query_bytes = query_page.read()
query_data = query_bytes.decode("utf8")
query_page.close()
page_root = ElementTree.fromstring(query_data)
articles = page_root.findall("{http://www.w3.org/2005/Atom}entry")
oldest_query_time = dateutil.parser.parse(
page_root.findtext("{http://www.w3.org/2005/Atom}updated")
) - datetime.timedelta(days=int(query_time))
# Break the loop if no articles found!
if not articles:
do_continue = False
break
# We put this sleep in to coform to the ArXiV bot standards
time.sleep(3)
# Build up the dataset of articles that we fetched
for article in articles:
link = article.findtext("{http://www.w3.org/2005/Atom}id")
title = article.findtext("{http://www.w3.org/2005/Atom}title")
abstract = article.findtext("{http://www.w3.org/2005/Atom}summary")
date = article.findtext("{http://www.w3.org/2005/Atom}updated")
authors = ", ".join([name.text for name in article.iter("{http://www.w3.org/2005/Atom}name")])
datetime_obj = dateutil.parser.parse(date)
# If the published articles is too old - we're done looking.
if datetime_obj < oldest_query_time:
do_continue = False
break
# Otherwise add the article
fetched_data.append((title, link, abstract, datetime_obj, authors))
current_page += 1
return fetched_data
def _summarize(queries, topics):
"""Summarize the queries using anthropic"""
# Get the abstracts
abstracts = [q[2] for q in queries]
titles = [q[0] for q in queries]
abstracts_and_titles = "\n\n".join([f"{t}: {a}" for t, a in zip(titles, abstracts)])
prompt = f"""The following are the titles and abstracts of the papers that you have been reading in the last time period. Briefly summarize them (while retaining the necessary detail) as if you were giving a report on the following topics: {topics}.
{abstracts_and_titles}"""
anthropic = Anthropic(api_key=CFG["CLAUDE_API_KEY"])
completion = anthropic.completions.create(
model="claude-2",
max_tokens_to_sample=512,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
)
return completion.completion
if __name__ == "__main__":
## 1. Parse the Config File
CFG = configparse.parse("arxivnotify.cfg")
# Check to see if any confiuration values are missing
if "KEYWORD" not in CFG:
raise ValueError(
"No keywords in the configuration file! Add one or more keywords using the 'KEYWORD' field in the config file"
)
if type(CFG["KEYWORD"]) is not list:
# If there is only one keyword, make it into a list
CFG["KEYWORD"] = [CFG["KEYWORD"]]
if "HISTORY_DAYS" not in CFG:
print("WARNING: No history length set in the configuration. Setting to default of 1 day.")
CFG["HISTORY_DAYS"] = "1"
if "MAILGUN_ROOT" not in CFG:
raise ValueError(
"No mailgun root specified! Specity the mailgun root using the 'MAILGUN_ROOT' field in the config file"
)
if "MAILGUN_API_KEY" not in CFG:
raise ValueError(
"No mailgun API key specified! Specity the mailgun root using the 'MAILGUN_API_KEY' field in the config file"
)
if "MAILGUN_FROM" not in CFG:
raise ValueError(
"No 'From Email' specified! Specity the 'From Email' using the 'MAILGUN_FROM' field in the config file"
)
if "MAILGUN_TO" not in CFG:
raise ValueError(
"No destination emails specified! Specity one or more destination emails using the 'MAILGUN_TO' field in the config file"
)
if type(CFG["MAILGUN_TO"]) is not list:
# If there is only one destination meail, make it into a list
CFG["MAILGUN_TO"] = [CFG["MAILGUN_TO"]]
## 2. Build the HTML email by quering ArXiV
all_results = []
try:
num_articles = 0
html_output = ""
for keyword in CFG["KEYWORD"]:
print("Parsing Keyword: {}".format(keyword))
queries = fetch_queries([keyword], CFG["HISTORY_DAYS"])
all_results.extend(queries)
html_output += "<h3>" + keyword + "</h3>\n"
html_output += "<ul>\n"
for q in queries:
num_articles += 1
html_output += "<li>\n"
html_output += '\t<b><u><a href="{}">{}</a></u></b>'.format(q[1], q[0])
html_output += "<br>\n"
html_output += "<i>{}</i>".format(q[4])
html_output += "<br>\n"
html_output += "{}\n".format(str(q[3]))
# html_output += "<br>\n"
# html_output += "{}\n".format(q[2])
html_output += "</li>\n"
html_output += "<br>\n"
html_output += "</ul>\n"
mail_subject = "ArXiVAI Bot Email - {} - {} New Articles".format(
datetime.date.today().strftime("%B %d, %Y"), num_articles
)
except Exception:
raise RuntimeError(
"There was an error fetching data from the ArXiV server! Check to make sure you are connected to the internet!"
)
## Add the summary:
summary = _summarize(all_results, CFG["KEYWORD"])
html_output = f"""<h2> ArXiVAI Bot Email - {datetime.date.today().strftime("%B %d, %Y")}</h2>
<h2> Your Research Summary </h2>
{summary}
{html_output}
"""
## 3. Send the Emails
RETURN_VAL = None
try:
for email in CFG["MAILGUN_TO"]:
RETURN_VAL = requests.post(
CFG["MAILGUN_ROOT"] + "/messages",
auth=("api", CFG["MAILGUN_API_KEY"]),
data={
"from": CFG["MAILGUN_FROM"],
"to": email,
"subject": mail_subject,
# "text": html_output,
"html": html_output,
},
)
if RETURN_VAL.status_code != 200:
raise RuntimeError("Mail Error: ", RETURN_VAL.text)
except:
raise RuntimeError(
"Arxiv notifier bot wasn't able to send an email! Check your mailgun API key and Root. HTML ERROR: {} {}".format(
RETURN_VAL.status_code, RETURN_VAL.text
)
)
| [
"The following are the titles and abstracts of the papers that you have been reading in the last time period. Briefly summarize them (while retaining the necessary detail) as if you were giving a report on the following topics: PLACEHOLDER.\n PLACEHOLDER",
"PLACEHOLDER PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | Omar-Eses/mcq_with_AI | logic.py | # essential imports in program
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
# take user input -> add it to prompt -> chain -> pass to model (chatgpt) -> return model -> show it on streamlit
# create quiz prompt
def generate_questions(quiz_topic="programming in python", num_of_questions=1):
llm = OpenAI()
# prompt to involve the user in the model
prompt_template = PromptTemplate(
input_variables=["quiz_topic", "num_of_questions"],
template="""
you are an expert making multiple choice questions in the following topic: {quiz_topic}.
create a multiple choice quiz that consists of {num_of_questions} questions,
each question should have four options one of them is correct and you should send it to me.
the correct answer should be marked with ^
format it in the known multiple choice quiz template; example:
Q1. what is the most famous programming language in the world?
a. javascript
b. Java
c. C++
d. Python
Q2. what is the most famous language of the following?
a. arabic
b. english
c. german
d. russian
- Answers:
<Answer1>: a
<Answer2>: b
"""
)
# Chaining the prompt so that it can be used to generate questions
questions_chain = LLMChain(llm=llm, prompt=prompt_template, output_key="question_format")
response = questions_chain({'quiz_topic': quiz_topic, 'num_of_questions': num_of_questions})
# # Parse the response to get questions and answers
# parser = StrOutputParser()
# parsed_response = parser(response)
#
# # Extract questions and answers
# questions = [item['content'] for item in parsed_response['output']['content']]
# answers = [item['answer'] for item in parsed_response['output']['content']]
return response
if __name__ == "__main__":
print(generate_questions("muscle hypertrophy", 2))
| [
"quiz_topic",
"num_of_questions",
"\n you are an expert making multiple choice questions in the following topic: {quiz_topic}.\n create a multiple choice quiz that consists of {num_of_questions} questions,\n each question should have four options one of them is correct and you should send it to me.\n the correct answer should be marked with ^\n format it in the known multiple choice quiz template; example:\n Q1. what is the most famous programming language in the world?\n a. javascript\n b. Java\n c. C++\n d. Python\n Q2. what is the most famous language of the following?\n a. arabic\n b. english\n c. german\n d. russian\n - Answers:\n <Answer1>: a\n <Answer2>: b\n "
] |
2024-01-10 | andy3278/CopyMyDesk | raw-data-to-openai.py | import pandas as pd
from datetime import datetime
import os
from dotenv import load_dotenv, find_dotenv
import openai
# read csv file
df = pd.read_csv('./data/youtube-desk-setup-raw-data.csv')
print(df.shape)
# do some data cleaning
df['transcript_text'] = df['transcript_text'].str.replace('\n', ' ')
df['transcript_text'] = df['transcript_text'].str.replace('\t', ' ')
df['transcript_text'] = df['transcript_text'].str.replace('[Music]', '')
df['transcript_text'] = df['transcript_text'].str.replace('[Applause]', '')
# remove rows with less than 100 words
df['word_count'] = df['transcript_text'].apply(lambda x: len(x.split()))
df = df[df['word_count'] > 500]
# remove rows published before 2023
df['release_date'] = df['release_date'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
df = df[df['release_date'] >= datetime(2023, 1, 1)]
print(df.shape)
df.to_csv('./data/cleaned-youtube-desk-setup.csv', index=False)
# openai api
load_dotenv(find_dotenv())
openai.api_key = os.environ.get("OPENAI_API_KEY")
def openai_api(text:str) -> str:
# openai api
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": """You will be provided with Desk setup youtube video transcripts, and your task is to extract what items are mentioned in video:
1. you must only extract items mentioned in the video
2. you must find 3 main items in the video, computer, mouse and keyboard, if its not found in video, say \"NA\"
3. if other desk items is mentioned also put them in the output, monitor, lights, desk, charger, computer dock etc.
4. if same category have multiple items put them in a string with comma separated.
5. your output format should be in json\n\n
here is one example of respond:
```
{"computer": "14 inch MacBook Pro", "mouse": "Logitech MX Master 3s", "keyboard": "Logitech MX mechanical mini", "monitor": "Apple Studio display, BenQ PD 3220u", "lights": "Elgato key light air", "desk": "Ikea countertop, Alex drawers, Carly kitchen countertop", "charger": "CalDigit TS4 Thunderbolt Hub", "computer dock": "Book Arc by 12 South", "neon sign": "custom neon sign by Illusion Neon", "acoustic panels": "gig Acoustics panels", "desk chair": "Autonomous Ergo chair plus", "scanner": "Fujitsu scanner", "charging stand": "Pataka charging stand", "pen": "Grovemade pen", "sticky notes": "sticky notes", "webcam": "Opal C1", "microphone": "Shure MV7", "audio interface": "Apollo twin X", "speakers": "Yamaha HS5", "headphones": "Rode NTH100s", "mic arm": "Rode PSA1 Plus", "controller": "Tour Box Elite", "light control": "Elgato Stream Deck Plus", "tablet": "iPad Pro", "tablet arm": "Cooks you desk arm", "monitor mount": "BenQ monitor mount", "travel charger": "ESR travel charger", "desk mat": "Grovemade felt mat", "smart home device": "Amazon Alexa Show", "security cameras": "UV security cameras", "Mac Mini": "Mac Mini Pro"}
```
"""
},
{
"role":"user",
"content": text
}
],
temperature=0,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response.choices[0]['message']['content']
# since openai api have rate limit of 3 request per minute, we will sleep 20s for each request
import time
# for index, row in df.iterrows():
# if index % 3 == 0:
# time.sleep(20)
# df.loc[index, 'items'] = openai_api(row['transcript_text'])
#df['items'] = df['transcript_text'].apply(openai_api)
# save results to csv
#df.to_csv('youtube-desk-setup.csv', index=False)
import requests
hf_api_key = os.environ.get("HF_API_KEY")
API_URL = "https://api-inference.huggingface.co/models/distilbert-base-cased-distilled-squad"
headers = {"Authorization": f"Bearer {hf_api_key}"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
# for index, row in df.iterrows():
# ans = {"answer": "NA"}
# try:
# ans = query({
# "inputs": {
# "question": "What is Operating System used? Windows, MacOS or Linux?",
# "context": row['transcript_text']
# }
# })
# except:
# pass
# df.loc[index, 'OS'] = ans['answer']
# print(f"row : {index} done")
# print(ans)
# if index >= 300:
# break
#print(df['OS'].value_counts())
#df.to_csv('./data/after-LLM-data.csv', index=False)
# output = query({
# "inputs": {
# "question": "What is my name?",
# "context": "My name is Clara and I live in Berkeley."
# },
# })
| [
"You will be provided with Desk setup youtube video transcripts, and your task is to extract what items are mentioned in video: \n 1. you must only extract items mentioned in the video \n 2. you must find 3 main items in the video, computer, mouse and keyboard, if its not found in video, say \"NA\" \n 3. if other desk items is mentioned also put them in the output, monitor, lights, desk, charger, computer dock etc.\n 4. if same category have multiple items put them in a string with comma separated.\n 5. your output format should be in json\n\n\n here is one example of respond:\n ```\n {\"computer\": \"14 inch MacBook Pro\", \"mouse\": \"Logitech MX Master 3s\", \"keyboard\": \"Logitech MX mechanical mini\", \"monitor\": \"Apple Studio display, BenQ PD 3220u\", \"lights\": \"Elgato key light air\", \"desk\": \"Ikea countertop, Alex drawers, Carly kitchen countertop\", \"charger\": \"CalDigit TS4 Thunderbolt Hub\", \"computer dock\": \"Book Arc by 12 South\", \"neon sign\": \"custom neon sign by Illusion Neon\", \"acoustic panels\": \"gig Acoustics panels\", \"desk chair\": \"Autonomous Ergo chair plus\", \"scanner\": \"Fujitsu scanner\", \"charging stand\": \"Pataka charging stand\", \"pen\": \"Grovemade pen\", \"sticky notes\": \"sticky notes\", \"webcam\": \"Opal C1\", \"microphone\": \"Shure MV7\", \"audio interface\": \"Apollo twin X\", \"speakers\": \"Yamaha HS5\", \"headphones\": \"Rode NTH100s\", \"mic arm\": \"Rode PSA1 Plus\", \"controller\": \"Tour Box Elite\", \"light control\": \"Elgato Stream Deck Plus\", \"tablet\": \"iPad Pro\", \"tablet arm\": \"Cooks you desk arm\", \"monitor mount\": \"BenQ monitor mount\", \"travel charger\": \"ESR travel charger\", \"desk mat\": \"Grovemade felt mat\", \"smart home device\": \"Amazon Alexa Show\", \"security cameras\": \"UV security cameras\", \"Mac Mini\": \"Mac Mini Pro\"}\n ```\n "
] |
2024-01-10 | andy3278/CopyMyDesk | Youtube-data-api-v3.py | import pandas as pd
from datetime import datetime
import os
from googleapiclient.discovery import build
from dotenv import load_dotenv, find_dotenv
from youtube_transcript_api import YouTubeTranscriptApi
import openai
# load secret from env
load_dotenv(find_dotenv())
openai.api_key = os.environ.get("OPENAI_API_KEY")
youtube_key = os.environ.get('YOUTUBE_KEY')
api_key = youtube_key
youtube = build('youtube', 'v3', developerKey=api_key)
# Replace 'your_keyword' with the keyword you want to search
max_results = 1000
serach_keyword = 'Desk setup 2023'
video_id = []
title = []
channel = []
release_date = []
request = youtube.search().list(
part='snippet',
maxResults=max_results,
q=serach_keyword,
publishedAfter='2023-01-01T00:00:00Z', # get only result after 2023
relevanceLanguage = 'en',
type = 'video'
)
# for item in response['items']:
# print('Video ID: ', item['id']['videoId'])
# print('Title: ', item['snippet']['title'])
# print('Channel: ', item['snippet']['channelTitle'])
# print('---------------------------')
# storage results in a dataframe
# use for loop to get 50 reuslts each time
for _ in range(max_results // 50):
response = request.execute()
next_page_token = response.get('nextPageToken')
request = youtube.search().list(
part='snippet',
maxResults=max_results,
q=serach_keyword,
publishedAfter='2023-01-01T00:00:00Z', # get only result after 2023
relevanceLanguage = 'en',
type = 'video',
pageToken = next_page_token
)
# append result in lists
for item in response['items']:
if item['id']['videoId'] not in video_id:
video_id.append(item['id']['videoId'])
title.append(item['snippet']['title'])
channel.append(item['snippet']['channelTitle'])
release_date.append(item['snippet']['publishedAt'])
# create dataframe
df = pd.DataFrame({'video_id': video_id, 'title':title, 'channel':channel, 'release_date':release_date})
# fotmat the release date
date_formatter = lambda x: datetime.strptime(x, '%Y-%m-%dT%H:%M:%SZ').strftime('%Y-%m-%d %H:%M:%S')
df['release_date'] = df['release_date'].apply(date_formatter)
print(df.shape)
# get video transcript
def get_transcripts(video_ids:str) -> list:
count = 0
transcripts = []
for video_id in video_ids:
print(f'Getting transcript for video {count}')
count += 1
try:
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=['en'])
transcripts.append(transcript)
except:
transcripts.append(None)
return transcripts
# get transcript for each video_id in df
df['transcript'] = get_transcripts(df['video_id'])
# clean transcript column
# if transcript is None, drop the row
df = df.dropna(subset=['transcript'])
print(df.shape)
# remove depulicate video id
df['video_id'].drop_duplicates(inplace=True)
print(df.shape)
# get transcript text
df['transcript_text'] = df['transcript'].apply(lambda x: ' '.join([item['text'] for item in x]))
# drop transcript column
df = df.drop('transcript', axis=1)
# pass transcript text to openai api and get desk items from transcript
# save df to csv first
df.to_csv('./data/youtube-desk-setup-raw-data.csv', index=False) | [] |
2024-01-10 | SocialGouv/llama-index-test | build_retrieval.py | from llama_index.response.notebook_utils import display_response
from llama_index.query_engine import SubQuestionQueryEngine, RouterQueryEngine
from typing import Callable, Optional
import shutil
from llama_index import VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.chat_engine import SimpleChatEngine
import openai
import os
import sys
from llama_index import SimpleDirectoryReader, Document
from llama_docs_bot.markdown_docs_reader import MarkdownDocsReader
from llama_index.schema import MetadataMode
from llama_index.node_parser import HierarchicalNodeParser, SimpleNodeParser, get_leaf_nodes
from llama_index.llms import OpenAI
from llama_index import ServiceContext, set_global_service_context
from llama_index import QueryBundle
from llama_index.utils import globals_helper
import logging
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
if 'OPENAI_API_KEY' not in os.environ:
raise Exception("OPENAI_API_KEY environment variable not set")
openai.api_key = os.environ['OPENAI_API_KEY']
def load_service_context(model="gpt-3.5-turbo-16k", max_tokens=512, temperature=0.1):
# Use local embeddings + gpt-3.5-turbo-16k
service_context = ServiceContext.from_defaults(
llm=OpenAI(model, max_tokens=max_tokens, temperature=temperature),
# embed_model="local:BAAI/bge-base-en"
)
return service_context
def load_markdown_docs(filepath, hierarchical=True):
"""Load markdown docs from a directory, excluding all other file types."""
loader = SimpleDirectoryReader(
input_dir=filepath,
required_exts=[".md"],
file_extractor={".md": MarkdownDocsReader()},
recursive=True
)
documents = loader.load_data()
if hierarchical:
# combine all documents into one
documents = [
Document(text="\n\n".join(
document.get_content(metadata_mode=MetadataMode.ALL)
for document in documents
)
)
]
# chunk into 3 levels
# majority means 2/3 are retrieved before using the parent
large_chunk_size = 1536
node_parser = HierarchicalNodeParser.from_defaults(
chunk_sizes=[
large_chunk_size,
large_chunk_size // 3,
]
)
nodes = node_parser.get_nodes_from_documents(documents)
return nodes, get_leaf_nodes(nodes)
else:
node_parser = SimpleNodeParser.from_defaults()
nodes = node_parser.get_nodes_from_documents(documents)
return nodes
def get_query_engine_tool(directory, description, hierarchical=True, postprocessors=None):
try:
storage_context = StorageContext.from_defaults(
persist_dir=f"./data_{os.path.basename(directory)}"
)
index = load_index_from_storage(storage_context)
if hierarchical:
retriever = AutoMergingRetriever(
index.as_retriever(similarity_top_k=6),
storage_context=storage_context
)
else:
retriever = index.as_retriever(similarity_top_k=12)
except:
if hierarchical:
nodes, leaf_nodes = load_markdown_docs(
directory, hierarchical=hierarchical)
docstore = SimpleDocumentStore()
docstore.add_documents(nodes)
storage_context = StorageContext.from_defaults(docstore=docstore)
index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context)
index.storage_context.persist(
persist_dir=f"./data_{os.path.basename(directory)}")
retriever = AutoMergingRetriever(
index.as_retriever(similarity_top_k=12),
storage_context=storage_context
)
else:
nodes = load_markdown_docs(directory, hierarchical=hierarchical)
index = VectorStoreIndex(nodes)
index.storage_context.persist(
persist_dir=f"./data_{os.path.basename(directory)}")
retriever = index.as_retriever(similarity_top_k=12)
query_engine = RetrieverQueryEngine.from_args(
retriever,
node_postprocessors=postprocessors or [],
)
return QueryEngineTool(query_engine=query_engine, metadata=ToolMetadata(name=directory, description=description))
class LimitRetrievedNodesLength:
def __init__(self, limit: int = 3000, tokenizer: Optional[Callable] = None):
self._tokenizer = tokenizer or globals_helper.tokenizer
self.limit = limit
def postprocess_nodes(self, nodes, query_bundle):
included_nodes = []
current_length = 0
for node in nodes:
current_length += len(self._tokenizer(
node.node.get_content(metadata_mode=MetadataMode.LLM)))
if current_length > self.limit:
break
included_nodes.append(node)
return included_nodes
def build_final_query_engine(service_context):
# Here we define the directories we want to index, as well as a description for each
# NOTE: these descriptions are hand-written based on my understanding. We could have also
# used an LLM to write these, maybe a future experiment.
docs_directories = {
# "./docs/community": "Useful for information on community integrations with other libraries, vector dbs, and frameworks.",
# "./docs/core_modules/agent_modules": "Useful for information on data agents and tools for data agents.",
# "./docs/core_modules/data_modules": "Useful for information on data, storage, indexing, and data processing modules.",
# "./docs/core_modules/model_modules": "Useful for information on LLMs, embedding models, and prompts.",
# "./docs/core_modules/query_modules": "Useful for information on various query engines and retrievers, and anything related to querying data.",
# "./docs/core_modules/supporting_modules": "Useful for information on supporting modules, like callbacks, evaluators, and other supporting modules.",
# "./docs/getting_started": "Useful for information on getting started with LlamaIndex.",
# "./docs/development": "Useful for information on contributing to LlamaIndex development.",
"./content/standup-fabrique": "Pour consulter l'actualité et les chiffres d'une startup.",
"./content/support-sre-fabrique": "Pour les questions techniques et développement et déploiement."
}
# Build query engine tools
query_engine_tools = [
get_query_engine_tool(
directory,
description,
hierarchical=True,
postprocessors=[LimitRetrievedNodesLength(limit=3000)]
) for directory, description in docs_directories.items()
]
# build top-level router -- this will route to multiple sub-indexes and aggregate results
# query_engine = SubQuestionQueryEngine.from_defaults(
# query_engine_tools=query_engine_tools,
# service_context=service_context,
# verbose=False
# )
query_engine = RouterQueryEngine.from_defaults(
query_engine_tools=query_engine_tools,
service_context=service_context,
select_multi=True
)
return query_engine
if __name__ == "__main__":
service_context = load_service_context()
set_global_service_context(service_context)
query_engine = build_final_query_engine(service_context)
response = query_engine.query("Quelle lib utiliser pour accéder à ma base de données ?")
display_response(response)
| [
"Pour consulter l'actualité et les chiffres d'une startup.",
"Pour les questions techniques et développement et déploiement."
] |
2024-01-10 | Anticiparte/chatbot-pdf | app_fixed.py | import streamlit as st
from PyPDF2 import PdfReader
from dotenv import load_dotenv
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css, bot_template, user_template
from langchain.llms import HuggingFaceHub
import glob
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(raw_text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len)
chunks = text_splitter.split_text(raw_text)
return chunks
def get_vector_store(text_chunks):
# embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
embeddings = OpenAIEmbeddings()
vector_store = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vector_store
def get_conversation_chain(vector_store):
llm = ChatOpenAI(temperature=0.75)
# llm = HuggingFaceHub(repo_id="google/flan-t5-xxl",
# model_kwargs={"temperature": 0.25, "max_length": 512})
memory = ConversationBufferMemory(
memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vector_store.as_retriever(),
memory=memory
)
return conversation_chain
def handle_user_input(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
def main():
load_dotenv()
if "conversation" not in st.session_state:
st.session_state.conversation = None
st.set_page_config(
page_title="Chatea con nuestro experto f", page_icon=":books:")
st.write(css, unsafe_allow_html=True)
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header("Chatea con nuestro experto :books:")
user_question = st.text_input(
"Haz una pregunta acerca de nuestra base de conocimiento en los documentos")
if user_question:
handle_user_input(user_question)
st.write(user_template.replace(
"{{MSG}}", "Hola Robot"), unsafe_allow_html=True)
st.write(bot_template.replace(
"{{MSG}}", "Hola Humano"), unsafe_allow_html=True)
with st.sidebar:
st.subheader("Los documentos")
pdf_docs = glob.glob('./reglamentos/*.pdf')
if st.button("Proceso"):
with st.spinner("Procesando, paciencia por favor"):
# get pdf text:
raw_text = get_pdf_text(pdf_docs)
st.write(raw_text)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
st.write(text_chunks)
# create vector database / vector store
vector_store = get_vector_store(text_chunks)
# create conversation chain
# conversation = get_conversation_chain(vector_store)
st.session_state.conversation = get_conversation_chain(
vector_store)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | RosettaTechnologies/AnkiBrain | ChatAI~ChatAIWithDocuments.py | import json
import os
from os import path
from typing import Tuple, List
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import TextLoader, PyPDFLoader, Docx2txtLoader, UnstructuredPowerPointLoader, \
UnstructuredHTMLLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.memory import ConversationBufferMemory
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from ChatInterface import ChatInterface
def get_file_extension(file_name: str) -> str:
return path.splitext(file_name)[1]
def rewrite_json_file(new_data: dict, f):
"""
Helper function to rewrite json root object to .json file.
:param new_data:
:param f:
:return:
"""
f.seek(0)
json.dump(new_data, f)
f.truncate()
user_data_dir = path.join(
path.abspath(path.dirname(__file__)),
'..',
'user_files'
)
default_documents_dir = path.join(user_data_dir, 'documents')
db_dir = path.join(user_data_dir, 'db')
if not path.isdir(db_dir):
os.mkdir(db_dir)
documents_json_path = path.join(user_data_dir, 'documents.json') # Not inside the documents dir.
persist_dir = path.join(db_dir, 'chroma-persist')
settings_path = path.join(user_data_dir, 'settings.json')
class ChatAIWithDocuments(ChatInterface):
def __init__(self, documents_dir_path: str = default_documents_dir, persist_directory=persist_dir):
if not path.isdir(persist_dir):
os.mkdir(persist_dir)
self.documents_dir_path = documents_dir_path
self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100, length_function=len)
temperature = 0
model_name = 'gpt-3.5-turbo'
with open(settings_path, 'r') as f:
data = json.load(f)
temperature = data['temperature']
model_name = data['llmModel']
self.llm = ChatOpenAI(temperature=temperature, model_name=model_name)
self.vectorstore = Chroma(embedding_function=HuggingFaceEmbeddings(), persist_directory=persist_directory)
self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer',
return_messages=True)
self.qa = ConversationalRetrievalChain.from_llm(
self.llm,
self.vectorstore.as_retriever(),
memory=self.memory,
return_source_documents=True
)
if not path.isfile(settings_path):
with open(settings_path, 'w') as f:
json.dump({}, f)
with open(settings_path, 'r+') as f:
settings = json.load(f)
if 'documents_saved' not in settings:
settings['documents_saved'] = []
rewrite_json_file(settings, f)
# self.scan_documents_folder()
def clear_memory(self):
self.memory.clear()
def scan_documents_folder(self):
"""
Looks through user documents to find any new docs. Will then add these
new docs to the vectorstore. We apparently cannot delete specific documents from the vectorstore
as an unfortunate limitation of the underlying Chroma database.
:return:
"""
with open(settings_path, 'r+') as f:
"""
Save all file paths found in user_files/documents.
For all the files that have been found:
if the file name and path is not already saved in documents.json, save it to documents.json.
Also, will persist it to vectorstore.
"""
data = json.load(f)
for dirName, subdirList, fileNames in os.walk(self.documents_dir_path):
for fileName in fileNames:
full_path = path.join(dirName, fileName)
if full_path not in data['documents_saved']:
data['documents_saved'].append(full_path)
self.add_document_from_path(full_path)
# Write changes to file using helper fn.
rewrite_json_file(data, f)
def add_document(self, document: Document):
self.add_documents([document])
def add_documents(self, documents: List[Document]):
self.vectorstore.add_documents(documents)
self.vectorstore.persist()
def split_document(self, docpath: str):
# Set up the loader based on file type.
ext = get_file_extension(docpath)
loader = None
documents: List[Document] = []
if ext == '.txt':
loader = TextLoader(docpath, encoding='utf-8')
documents = loader.load()
documents = self.text_splitter.split_documents(documents)
elif ext == '.pdf':
loader = PyPDFLoader(docpath)
documents = loader.load()
documents = self.text_splitter.split_documents(documents)
elif ext == '.docx':
loader = Docx2txtLoader(docpath)
documents = loader.load()
documents = self.text_splitter.split_documents(documents)
elif ext == '.pptx':
loader = UnstructuredPowerPointLoader(docpath)
documents = loader.load()
documents = self.text_splitter.split_documents(documents)
elif ext == '.html':
loader = UnstructuredHTMLLoader(docpath)
documents = loader.load()
documents = self.text_splitter.split_documents(documents)
else:
raise Exception(
'Document type not supported at this time.\n'
'Please import a document with a supported extension.\n'
)
return documents
def add_document_from_path(self, docpath: str):
"""
Takes in a path to a file, applies the splitter to create document(s), then
adds the document(s) to the vectorstore.
:param docpath:
:return:
"""
docs = self.split_document(docpath)
self.add_documents(docs)
def clear_documents(self):
self.vectorstore.delete_collection()
self.vectorstore.persist()
def human_message(self, query: str) -> Tuple[str, list[dict[str, str]]]:
result = self.qa({'question': query})
answer = result['answer']
source_documents: List[Document] = result['source_documents']
source_documents_output: List[dict[str, str]] = []
for doc in source_documents:
source_documents_output.append({
'page_content': doc.page_content,
'source': doc.metadata['source']
})
return answer, source_documents_output
| [] |
2024-01-10 | RosettaTechnologies/AnkiBrain | ChatAI~ChatInterface.py | from abc import ABC, abstractmethod
from typing import Tuple, Optional, List, TypedDict
from langchain.schema import Document
def extract_json_array(s):
start = s.find('[')
end = s.rfind(']') + 1 # +1 to include the bracket itself
if start != -1 and end != -1:
return s[start:end]
else:
return None
class BadOutputGenerateCardsException(Exception):
def __init__(self, data):
super().__init__()
self.data = data
class ChatInterface(ABC):
@abstractmethod
def clear_memory(self):
pass
@abstractmethod
def human_message(self, query: str) -> Tuple[str, Optional[List[Document]]]:
pass
def single_query_resets_memory(self, query: str):
self.clear_memory()
response, _ = self.human_message(query)
self.clear_memory()
return response
class ExplainTopicOptions(TypedDict):
level_of_detail: str
level_of_expertise: str
def explain_topic(self, topic: str, options: ExplainTopicOptions = None) -> str:
if options is None:
options = {'custom_prompt': '', 'level_of_detail': 'EXTREME', 'level_of_expertise': 'EXPERT', 'language': 'English'}
custom_prompt = options['custom_prompt']
level_of_detail = options['level_of_detail']
level_of_expertise = options['level_of_expertise']
language = options['language']
query = f'''
Explain X using the following parameters:
X = {topic}
LEVEL OF DETAIL = {level_of_detail}
LEVEL OF EXPERTISE = {level_of_expertise}
LANGUAGE = {language}
{'When finished, make sure your response is in ' +
language + ' only.' if language is not 'English' else ''}
{custom_prompt}
'''
explanation = self.single_query_resets_memory(query)
return explanation
class GenerateCardsOptions(TypedDict):
type: str
def generate_cards(self, text: str, options: GenerateCardsOptions = None) -> str:
if options is None:
options = {'type': 'basic', 'language': 'English'}
custom_prompt = options['custom_prompt']
card_type = options['type']
language = options['language']
query = ''
if card_type == 'basic':
query = f'''
Please read the {language} text below in quotes:
"{text}"
From the text above, I want you to create flash cards in {language}. Output in JSON format, using the following as a strict template for the format.
[
{{
"front": "This is an example of the front of a card generated by ChatGPT to query the material. You can be creative about the best way to ask a question.",
"back": "This is the back of the card that is the answer to the front."
}},
{{
"front": "This is the front of another card.",
"back": "This is the back of another card."
}}
]
{"The example given above is in English, but remember to translate the final cards into " +
language + ". The front text and the back text should be in " + language +
"!. The names of the JSON fields themselves ('front' and 'back') should remain in English."
if language is not 'English' else ''
}
{custom_prompt}
Do not output any other text besides JSON. Begin output now as the template above.
'''
elif card_type == 'cloze':
query = f'''
Please read the {language} text below in quotes.
"{text}"
From the text above, I want you to create flash cards in {language}.
These are special cards where you omit key words or phrases.
You can use asterisks *like this* to indicate that a word or phrase
should be hidden for whoever is studying the card.
You can create multiple deletions (omissions) per card.
Please decide to hide key words or phrases depending on how important they are to the context.
If a word or phrase is very important, you should definitely hide it using *this notation*!
Output in JSON format, using the following as a strict template for your response format:
[
{{
"text": "This is an example of a *flash card* made by you."
}},
{{
"text": "This is the *second* flash *card*, this time containing *three deletions*."
}},
{{
"text": "Please omit key *words* or *an entire phrase* using asterisks."
}}
]
I want each card to be relatively small - that means your "text" field should not be more than one sentence.
You MUST have at least ONE deletion per flash card using *this notation*.
{'The example given above is in English, but remember to translate the final cards into ' +
language + "! The name of the JSON field itself ('text') should remain in English."
if language is not 'English' else ''
}
{custom_prompt}
Always produce cards that have at least one *deletion* using the *asterisk notation* as I have shown you.
Hiding words or phrases using asterisks is critical to the flash card learning process.
Do not output any other text besides JSON. Begin output now following the template above.
'''
else:
raise Exception('Invalid card type')
cards_raw_str = self.single_query_resets_memory(query).strip()
cards_raw_str = extract_json_array(cards_raw_str) # ???
return cards_raw_str
# try:
# cards = json.loads(cards_json_str)
# for card in cards:
# card['tags'] = []
# card['type'] = card_type
# return cards
# except Exception as e:
# raise BadOutputGenerateCardsException({'message': 'Malformed JSON output', 'json': cards_json_str})
| [
"custom_prompt"
] |
2024-01-10 | RosettaTechnologies/AnkiBrain | ChatAI~ChatAIWithoutDocuments.py | import json
from os import path
from typing import Tuple
from langchain import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from ChatInterface import ChatInterface
user_data_dir = path.join(
path.abspath(path.dirname(__file__)),
'..',
'user_files'
)
settings_path = path.join(user_data_dir, 'settings.json')
class ChatAIWithoutDocuments(ChatInterface):
def __init__(self, verbose=False):
temperature = 0
model_name = 'gpt-3.5-turbo'
with open(settings_path, 'r') as f:
data = json.load(f)
temperature = data['temperature']
model_name = data['llmModel']
self.llm = ChatOpenAI(temperature=temperature, model_name=model_name)
self.memory = ConversationBufferMemory()
self.conversationChain = ConversationChain(llm=self.llm, memory=self.memory, verbose=verbose)
def human_message(self, query: str) -> Tuple[str, None]:
return self.conversationChain.predict(input=query), None
def clear_memory(self):
self.memory.clear()
| [] |
2024-01-10 | RosettaTechnologies/AnkiBrain | AnkiBrain.py | import asyncio
import json
import platform
import signal
import threading
from anki.hooks import addHook
from aqt import mw, gui_hooks
from aqt.qt import *
from aqt.utils import showInfo
from dotenv import set_key, load_dotenv
from ChatAIModuleAdapter import ChatAIModuleAdapter
from ExplainTalkButtons import ExplainTalkButtons
from InterprocessCommand import InterprocessCommand as IC
from OpenAIAPIKeyDialog import OpenAIAPIKeyDialog
from PostUpdateDialog import PostUpdateDialog
from SidePanel import SidePanel
from UserModeDialog import show_user_mode_dialog
from card_injection import handle_card_will_show
from changelog import ChangelogDialog
from project_paths import dotenv_path
from util import run_win_install, run_macos_install, run_linux_install, UserMode
class GUIThreadSignaler(QObject):
"""
Required class for calling UI updates from the non-UI thread.
"""
resetUISignal = pyqtSignal()
openFileBrowserSignal = pyqtSignal(int) # takes commandId so we can resolve the request
showNoAPIKeyDialogSignal = pyqtSignal()
sendToJSFromAsyncThreadSignal = pyqtSignal(dict)
def __init__(self):
super().__init__()
self.resetUISignal.connect(self.reset_ui)
self.openFileBrowserSignal.connect(self.open_file_browser)
self.showNoAPIKeyDialogSignal.connect(self.show_no_API_key_dialog)
self.sendToJSFromAsyncThreadSignal.connect(self.send_to_js_from_async_thread)
def send_to_js_from_async_thread(self, json_dict: dict):
mw.ankiBrain.sidePanel.webview.send_to_js(json_dict)
def show_no_API_key_dialog(self):
showInfo('AnkiBrain has loaded. There is no API key detected, please set one before using the app.')
def reset_ui(self):
mw.reset()
def open_file_browser(self, commandId):
print(f'Opening file browser with commandId {commandId}')
dialog = QFileDialog()
full_paths, _ = dialog.getOpenFileNames()
# No files selected (empty array).
if not full_paths:
mw.ankiBrain.reactBridge.trigger(IC.DID_CLOSE_DOCUMENT_BROWSER_NO_SELECTIONS, commandId=commandId)
return
documents = []
for path in full_paths:
file_name_with_extension = os.path.basename(path)
file_name, extension = os.path.splitext(file_name_with_extension)
documents.append({
'file_name_with_extension': os.path.basename(path),
'file_name': file_name,
'extension': extension,
'path': path,
'size': os.path.getsize(path)
})
print(f'Selected documents: {json.dumps(documents)}')
# user_mode = mw.settingsManager.get_user_mode()
# if user_mode == UserMode.SERVER:
mw.ankiBrain.reactBridge.send_cmd(
IC.DID_SELECT_DOCUMENTS,
data={'documents': documents},
commandId=commandId
)
# elif user_mode == UserMode.LOCAL:
# mw.ankiBrain.reactBridge.trigger(IC.ADD_DOCUMENTS, documents=documents)
class AnkiBrain:
def __init__(self, user_mode: UserMode = UserMode.LOCAL):
self.user_mode = user_mode
self.loop = None
self.sidePanel = SidePanel("AnkiBrain", mw)
self.sidePanel.webview.page().loadFinished.connect(self.on_webengine_load_finished)
self.webview_loaded = False
self.explainTalkButtons = None
self.selectedText = ''
self.chatAI = ChatAIModuleAdapter() # Requires async starting by calling .start
self.chatReady = False
self.openai_api_key_dialog = OpenAIAPIKeyDialog()
self.openai_api_key_dialog.hide()
# Should go last because this object takes self and can call items.
# Therefore, risk of things not completing setup.
from ReactBridge import ReactBridge
self.reactBridge = ReactBridge(self)
self.guiThreadSignaler = GUIThreadSignaler()
self.setup_ui()
def __del__(self):
self.sidePanel.deleteLater()
asyncio.run(self.chatAI.stop())
def setup_ui(self):
mw.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, self.sidePanel)
self.sidePanel.resize(500, mw.height())
# Set up api key dialog.
self.openai_api_key_dialog.on_key_save(self.handle_openai_api_key_save)
# Hook for injecting custom javascript into Anki cards.
addHook("prepareQA", handle_card_will_show)
# Hook for Anki's card webview JS function `pycmd`
gui_hooks.webview_did_receive_js_message.append(self.handle_anki_card_webview_pycmd)
add_ankibrain_menu_item('Show/Hide AnkiBrain', self.toggle_panel)
add_ankibrain_menu_item('Switch User Mode...', show_user_mode_dialog)
if self.user_mode == UserMode.LOCAL:
add_ankibrain_menu_item('Restart AI...', self.restart_async_members_from_sync)
add_ankibrain_menu_item('Set OpenAI API Key...', self.show_openai_api_key_dialog)
add_ankibrain_menu_item('Reinstall...', reinstall)
# Check if AnkiBrain has been updated.
has_updated = mw.settingsManager.has_ankibrain_updated()
if has_updated:
# If updated, need to have the user reinstall python dependencies.
# Show PostUpdateDialog.
mw.updateDialog = PostUpdateDialog(mw)
mw.updateDialog.show()
add_ankibrain_menu_item('Show Changelog', show_changelog)
self.main()
def on_webengine_load_finished(self):
print('Webview finished loading.')
self.webview_loaded = True
async def load_user_settings(self):
settings = mw.settingsManager.settings
print('Sending DID_LOAD_USER_FILES')
self.reactBridge.send_cmd(IC.DID_LOAD_SETTINGS, settings)
async def _start_async_members(self):
"""
Start up all async members here.
:return:
"""
# Make sure webview is loaded.
while not self.webview_loaded:
print('Webview is not loaded yet, sleeping async...')
await asyncio.sleep(0.1)
if self.user_mode == UserMode.LOCAL:
self.reactBridge.send_cmd(IC.SET_WEBAPP_LOADING_TEXT, {'text': 'Starting AI Engine...'})
print('Starting AnkiBrain...')
await self.chatAI.start()
self.chatReady = True
print('AnkiBrain ChatAI loaded. App is ready.')
self.reactBridge.send_cmd(IC.SET_WEBAPP_LOADING_TEXT, {'text': 'Loading your settings...'})
await self.load_user_settings()
self.reactBridge.send_cmd(IC.DID_FINISH_STARTUP)
# Check for key in .env file in user_files
if self.user_mode == UserMode.LOCAL:
load_dotenv(dotenv_path, override=True)
if os.getenv('OPENAI_API_KEY') is None or os.getenv('OPENAI_API_KEY') == '':
print('No API key detected')
self.guiThreadSignaler.showNoAPIKeyDialogSignal.emit()
else:
print(f'Detected API Key: {os.getenv("OPENAI_API_KEY")}')
async def _stop_async_members(self):
"""
Stop all async members here.
:return:
"""
if self.user_mode == UserMode.LOCAL:
print('Stopping AnkiBrain...')
await self.chatAI.stop()
self.chatReady = False
async def restart_async_members(self):
print('Restarting AnkiBrain...')
print('Setting web app loading: True')
self.reactBridge.set_webapp_loading(True)
await self._stop_async_members()
await self._start_async_members()
print('Setting web app loading: False')
self.reactBridge.set_webapp_loading(False)
self.reactBridge.send_cmd(IC.STOP_LOADERS)
def restart_async_members_from_sync(self):
"""
Restart AnkiBrain from a synchronous thread.
This dispatches a task in the async event loop that runs AnkiBrain.
This is a synchronous function but is a non-blocking operation.
:return:
"""
asyncio.run_coroutine_threadsafe(self.restart_async_members(), mw.ankiBrain.loop)
async def ask_dummy(self, query: str):
output = await self.chatAI.ask_dummy(query)
return output
def handle_openai_api_key_save(self, key):
self.openai_api_key_dialog.hide()
set_key(dotenv_path, 'OPENAI_API_KEY', key)
os.environ['OPENAI_API_KEY'] = key
self.restart_async_members_from_sync()
def _handle_process_signal(self, signal, frame):
try:
self.chatAI.scriptManager.terminate_sync()
except Exception as e:
print(str(e))
exit(0)
def main(self):
"""
Runs AnkiBrain's async members in an asyncio event loop in a separate thread to not block Anki's UI.
:return:
"""
# Set up signal handling in main thread.
signal.signal(signal.SIGINT, self._handle_process_signal)
signal.signal(signal.SIGTERM, self._handle_process_signal)
def start_async_loop(_loop):
asyncio.set_event_loop(_loop)
_loop.run_forever()
loop = asyncio.new_event_loop()
self.loop = loop
t = threading.Thread(target=start_async_loop, args=(loop,))
t.daemon = True
t.start()
try:
asyncio.run_coroutine_threadsafe(self._start_async_members(), loop)
except Exception as e:
print(e)
def stop_main(self):
asyncio.run_coroutine_threadsafe(self._stop_async_members(), self.loop)
# Cancel all tasks on the loop
for task in asyncio.all_tasks(self.loop):
task.cancel()
# Stop the loop
mw.ankiBrain.loop.call_soon_threadsafe(self.loop.stop)
def toggle_panel(self):
if self.sidePanel.isVisible():
self.sidePanel.hide()
mw.settingsManager.edit('showSidePanel', False)
else:
self.sidePanel.show()
mw.settingsManager.edit('showSidePanel', True)
def show_openai_api_key_dialog(self):
self.openai_api_key_dialog.show()
def handle_anki_card_webview_pycmd(self, handled, cmd, context):
try:
data = json.loads(cmd)
if data['cmd'] == 'selectedText':
print('detected text selection')
self.handle_text_selected(text=data['text'], position=data['position'])
return True, None
elif data['cmd'] == 'mousedown':
print('detected mousedown')
self.handle_mousedown()
return True, None
else:
return handled
except Exception as e:
print(e)
return handled
def handle_text_selected(self, text='', position=None):
if self.explainTalkButtons is not None:
self.explainTalkButtons.destroy()
self.selectedText = text
self.explainTalkButtons = ExplainTalkButtons(mw, position)
self.explainTalkButtons.on_explain_button_click(self.handle_explain_text_pressed)
self.explainTalkButtons.on_talk_button_click(self.handle_talk_text_pressed)
# Basically detecting highlight release.
def handle_mousedown(self):
if self.explainTalkButtons is not None:
self.explainTalkButtons.destroy()
self.selectedText = ''
def handle_explain_text_pressed(self):
self.sidePanel.webview.send_to_js({
'cmd': 'explainSelectedText',
'text': self.selectedText
})
self.explainTalkButtons.destroy()
self.selectedText = ''
def handle_talk_text_pressed(self):
self.sidePanel.webview.send_to_js({
'cmd': 'talkSelectedText',
'text': self.selectedText
})
self.explainTalkButtons.destroy()
self.selectedText = ''
def reinstall():
system = platform.system()
if system == 'Windows':
run_win_install()
elif system == 'Darwin':
run_macos_install()
elif system == 'Linux':
run_linux_install()
showInfo('Terminal updater has been launched. Restart Anki after install is completed.')
def show_changelog():
mw.changelog = ChangelogDialog(mw)
mw.changelog.show()
def add_ankibrain_menu_item(name: str, fn):
action = mw.ankibrain_menu.addAction(name)
qconnect(action.triggered, fn)
# Keep track of added actions for removal later if needed.
mw.menu_actions.append(action)
def remove_ankibrain_menu_actions():
for action in mw.menu_actions:
print(f'Removing menu action: {str(action)}')
mw.form.menubar.removeAction(action)
| [] |
2024-01-10 | harshalbhatia/osh | osh~models~curie.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_type = os.getenv("OPENAI_API_TYPE")
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_base = os.getenv("OPENAI_API_BASE")
openai.api_version = os.getenv("OPENAI_API_VERSION")
OPENAI_API_DEPLOYMENT_ID = os.getenv("OPENAI_API_DEPLOYMENT_ID")
def predict(data: dict):
response = openai.Completion.create(
engine="vchar-curie",
prompt=f"""You are a helpful assistant that helps users of Unix systems (linux/mac and the like) with their terminal execution issues. The user will provide input/error info and you're supposed to reply with suggestions to help them fix the problem. It could be alternate commands, missing flags, and the like. If you're not sure, suggest the user to read more about a particular term. Please suggest a correct command if you have one and then a brief explanation.n\nDetails: MaxTokens = 150+{"; ".join(list(map(lambda i: ": ".join(i), data.items())))} \n\nExplanation: """,
temperature=0.7,
max_tokens=250,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
best_of=1,
stop=None)
return response["choices"][0]["text"]
| [
"; ",
": "
] |
2024-01-10 | harshalbhatia/osh | osh~models~turbo.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_type = os.getenv("OPENAI_API_TYPE")
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_base = os.getenv("OPENAI_API_BASE")
openai.api_version = os.getenv("OPENAI_API_VERSION")
OPENAI_API_DEPLOYMENT_ID = os.getenv("OPENAI_API_DEPLOYMENT_ID")
def predict(data: dict):
response = openai.ChatCompletion.create(
deployment_id=OPENAI_API_DEPLOYMENT_ID,
messages=[
{
"role":
"system",
"content":
"You are a helpful assistant that helps users of Unix systems (linux/mac and the like) with their terminal execution issues. The user will provide input/error info and you're supposed to reply with suggestions to help them fix the problem. It could be alternate commands, missing flags, and the like. If you're not sure, suggest the user to read more about a particular term. Please suggest a correct command if you have one and then a brief explanation."
},
{
"role": "user",
# TODO: escaping logic and add ""
"content": ";".join(list(map(lambda i: ": ".join(i), data.items())))
},
],
temperature=0
)
return response['choices'][0]['message']['content']
| [
": ",
"You are a helpful assistant that helps users of Unix systems (linux/mac and the like) with their terminal execution issues. The user will provide input/error info and you're supposed to reply with suggestions to help them fix the problem. It could be alternate commands, missing flags, and the like. If you're not sure, suggest the user to read more about a particular term. Please suggest a correct command if you have one and then a brief explanation."
] |
2024-01-10 | gitgithan/uplimit | podcast_backend.py | import modal
def download_whisper():
# Load the Whisper model
import os
import whisper
print ("Download the Whisper model")
# Perform download only once and save to Container storage
whisper._download(whisper._MODELS["medium"], '/content/podcast/', False)
stub = modal.Stub("corise-podcast-project")
corise_image = modal.Image.debian_slim().pip_install("feedparser",
"https://github.com/openai/whisper/archive/9f70a352f9f8630ab3aa0d06af5cb9532bd8c21d.tar.gz",
"requests",
"ffmpeg",
"openai",
"tiktoken",
"wikipedia",
"ffmpeg-python").apt_install("ffmpeg").run_function(download_whisper)
@stub.function(image=corise_image, gpu="any", timeout=600)
def get_transcribe_podcast(rss_url, local_path):
print ("Starting Podcast Transcription Function")
print ("Feed URL: ", rss_url)
print ("Local Path:", local_path)
# Read from the RSS Feed URL
import feedparser
intelligence_feed = feedparser.parse(rss_url)
podcast_title = intelligence_feed['feed']['title']
episode_title = intelligence_feed.entries[712]['title']
episode_image = intelligence_feed['feed']['image'].href
for item in intelligence_feed.entries[0].links:
if (item['type'] == 'audio/mpeg'):
episode_url = item.href
episode_name = "podcast_episode.mp3"
print ("RSS URL read and episode URL: ", episode_url)
# Download the podcast episode by parsing the RSS feed
from pathlib import Path
p = Path(local_path)
p.mkdir(exist_ok=True)
print ("Downloading the podcast episode")
import requests
with requests.get(episode_url, stream=True) as r:
r.raise_for_status()
episode_path = p.joinpath(episode_name)
with open(episode_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
print ("Podcast Episode downloaded")
# Load the Whisper model
import os
import whisper
# Load model from saved location
print ("Load the Whisper model")
model = whisper.load_model('medium', device='cuda', download_root='/content/podcast/')
# Perform the transcription
print ("Starting podcast transcription")
result = model.transcribe(local_path + episode_name)
# Return the transcribed text
print ("Podcast transcription completed, returning results...")
output = {}
output['podcast_title'] = podcast_title
output['episode_title'] = episode_title
output['episode_image'] = episode_image
output['episode_transcript'] = result['text']
return output
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"))
def get_podcast_summary(podcast_transcript):
import openai
instructPrompt = """
Summarize the podcast in bullet points on the topics discussed and references to people, places, books, websites.
"""
request = instructPrompt + podcast_transcript
chatOutput = openai.ChatCompletion.create(model="gpt-3.5-turbo",
messages=[{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": request}
]
)
podcastSummary = chatOutput.choices[0].message.content
return podcastSummary
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"))
def get_podcast_guest(podcast_transcript):
import openai
import wikipedia
import json
podcastGuest = ''
return podcastGuest
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"))
def get_podcast_highlights(podcast_transcript):
import openai
podcastHighlights = ''
return podcastHighlights
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"), timeout=1200)
def process_podcast(url, path):
output = {}
podcast_details = get_transcribe_podcast.call(url, path)
podcast_summary = get_podcast_summary.call(podcast_details['episode_transcript'])
podcast_guest = get_podcast_guest.call(podcast_details['episode_transcript'])
podcast_highlights = get_podcast_highlights.call(podcast_details['episode_transcript'])
output['podcast_details'] = podcast_details
output['podcast_summary'] = podcast_summary
output['podcast_guest'] = podcast_guest
output['podcast_highlights'] = podcast_highlights
return output
@stub.local_entrypoint()
def test_method(url, path):
output = {}
podcast_details = get_transcribe_podcast.call(url, path)
print ("Podcast Summary: ", get_podcast_summary.call(podcast_details['episode_transcript']))
print ("Podcast Guest Information: ", get_podcast_guest.call(podcast_details['episode_transcript']))
print ("Podcast Highlights: ", get_podcast_highlights.call(podcast_details['episode_transcript'])) | [
"You are a helpful assistant.",
"\n Summarize the podcast in bullet points on the topics discussed and references to people, places, books, websites.\n ",
"\n Summarize the podcast in bullet points on the topics discussed and references to people, places, books, websites.\n PLACEHOLDER"
] |
2024-01-10 | tencent-ailab/TPolicies | tpolicies~layers.py | """layers extension in the style of tf.layers/slim.layers"""
from collections import namedtuple
from functools import partial
import tensorflow as tf
from tensorflow.contrib.framework import add_arg_scope
import tensorflow.contrib.layers as tfc_layers
from tensorflow.contrib.layers.python.layers import utils as lutils
from tensorflow.contrib.layers import xavier_initializer
import tpolicies.ops as tp_ops
from tpolicies import ops as tp_ops
from tpolicies.ops import INF, cat_sample_from_logits, ortho_init
from tpolicies.ops import one_step_lstm_op
from tpolicies.utils.distributions import CategoricalPdType, BernoulliPdType
from tpolicies.utils.distributions import MaskSeqCategoricalPdType
from tpolicies.utils.distributions import DiagGaussianPdType
@add_arg_scope
def identity_layer(inputs, outputs_collections=None, scope=None):
"""Identity layer.
Args:
inputs: A Tensor
outputs_collections:
scope:
Returns:
A outputs `Tensor`.
"""
with tf.variable_scope(scope, default_name='identity_layer') as sc:
outputs = tf.identity(inputs)
return lutils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def glu(inputs, context, output_size, outputs_collections=None, scope=None):
"""Gated Linear Units.
The impl follows the GLU described in the Supplementary Material of AlphaStar
Nature paper.
Args:
inputs: (bs, M), the input `Tensor`
context: (bs, N), the context `Tensor`
output_size: int, output size
outputs_collections:
scope:
Returns:
An output `Tensor`.
"""
inputs_shape = inputs.get_shape().as_list()
assert len(inputs_shape) == 2
inputs_size = inputs_shape[1]
with tf.variable_scope(scope, default_name='glu') as sc:
# NOTE(pengsun): activation_fn must be None
gate = tf.nn.sigmoid(
tfc_layers.fully_connected(context, inputs_size, activation_fn=None)
)
gated_inputs = tf.math.multiply(gate, inputs) # elementwise times
# NOTE(pengsun): activation_fn must be None
outputs = tfc_layers.fully_connected(gated_inputs, output_size,
activation_fn=None)
return lutils.collect_named_outputs(outputs_collections, sc.name, outputs)
# sparse embedding stuff
@add_arg_scope
def linear_embed(inputs,
vocab_size,
enc_size,
inverse_embed=False,
outputs_collections=None,
weights_initializer=ortho_init(scale=1.0),
weights_regularizer=None,
scope=None):
"""Linear embedding layer, simply tf.nn.embedding_lookup or inverse embedding.
In the case of linear embedding, the inputs is a Tensor of index (discrete to
dense embedding) when inverse_embed=True; For inverse embedding, the inputs is
a Tensor (dense to ense embedding) when inverse_embed=False.
Args:
inputs: when inverse_embed=False, (bs, d1, ...), each is an index in
range(vocab_size); when inverse_embed=True, (bs, enc_size)
vocab_size:
enc_size:
inverse_embed: True: a "enc_size -> vocab_size" query; False: a
"vocab_size -> enc_size" embedding
weights_initializer:
weights_regularizer:
scope:
Returns:
An outputs `Tensor`.
"""
with tf.variable_scope(scope, default_name='linear_embed') as sc:
weights = tf.get_variable('weights', (vocab_size, enc_size),
initializer=weights_initializer,
regularizer=weights_regularizer)
if not inverse_embed:
assert inputs.dtype in [tf.int32, tf.int64], 'inputs must be index'
outputs = tf.nn.embedding_lookup(weights, inputs)
outputs_alias = sc.name
else:
assert inputs.dtype in [tf.float16, tf.float32, tf.float64], (
'inputs must be a dense tensor')
outputs = tf.matmul(inputs, weights, transpose_b=True)
outputs_alias = sc.name + '_inverse'
return lutils.collect_named_outputs(outputs_collections, outputs_alias,
outputs)
# normalization stuff
@add_arg_scope
def ln(inputs,
epsilon=1e-8,
begin_norm_axis=-1,
activation_fn=None,
enable_openai_impl=False,
scope=None):
"""Applies layer normalization.
See https://arxiv.org/abs/1607.06450.
CAUTION: presume the last dim (shape[-1]) being the feature dim!!
TODO(pengsun): doc from where this impl is borrowed
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension
has `batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision
Error.
begin_norm_axis: beginning dim
activation_fn: activation function. None means no activation.
enable_openai_impl:
scope: Optional scope for `variable_scope`.
Returns:
A tensor with the same shape and data dtype as `inputs`.
"""
with tf.variable_scope(scope, default_name="ln"):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
inputs_rank = inputs_shape.ndims
if begin_norm_axis < 0:
begin_norm_axis = inputs_rank + begin_norm_axis
if begin_norm_axis >= inputs_rank:
raise ValueError('begin_norm_axis (%d) must be < rank(inputs) (%d)' %
(begin_norm_axis, inputs_rank))
norm_axes = list(range(begin_norm_axis, inputs_rank))
mean, variance = tf.nn.moments(inputs, norm_axes, keep_dims=True)
beta = tf.get_variable("beta", params_shape,
initializer=tf.zeros_initializer())
gamma = tf.get_variable("gamma", params_shape,
initializer=tf.ones_initializer())
if enable_openai_impl:
normalized = (inputs - mean) / tf.sqrt(variance + epsilon)
outputs = normalized * gamma + beta
else:
normalized = (inputs - mean) / ((variance + epsilon) ** (.5))
outputs = gamma * normalized + beta
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
@add_arg_scope
def inst_ln(inputs,
epsilon=1e-8,
enable_openai_impl=False,
activation_fn=None,
scope=None):
"""Applies Instance normalization.
See https://arxiv.org/pdf/1607.08022.pdf.
CAUTION: presume the last dim (shape[-1]) being the feature dim!!
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension
has `batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision
Error.
activation_fn: activation function. None means no activation.
enable_openai_impl:
scope: Optional scope for `variable_scope`.
Returns:
A tensor with the same shape and data dtype as `inputs`.
"""
with tf.variable_scope(scope, default_name="inst_ln"):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
inputs_rank = inputs_shape.ndims
norm_axes = list(range(1, inputs_rank - 1))
mean, variance = tf.nn.moments(inputs, norm_axes, keep_dims=True)
beta = tf.get_variable("beta", params_shape,
initializer=tf.zeros_initializer())
gamma = tf.get_variable("gamma", params_shape,
initializer=tf.ones_initializer())
if enable_openai_impl:
normalized = (inputs - mean) / tf.sqrt(variance + epsilon)
outputs = normalized * gamma + beta
else:
normalized = (inputs - mean) / ((variance + epsilon) ** (.5))
outputs = gamma * normalized + beta
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
# dense-/res- net stuff
@add_arg_scope
def dense_sum_blocks(inputs, n, enc_dim, layer_norm: bool = True,
outputs_collections=None, scope=None):
"""Dense-sum blocks with fully connected layers.
Args:
inputs:
n: Number of blocks. Each blocks has exactly one fully connected layer.
enc_dim: output encoding dim.
layer_norm: Whether to use layer norm
outputs_collections:
scope:
Returns:
An outputs `Tensor`.
"""
with tf.variable_scope(scope, default_name='densesum_blks') as sc:
embed = inputs
pre_embeds_sum = None
for i in range(n):
embed = tfc_layers.fully_connected(embed, enc_dim)
if i == 0:
pre_embeds_sum = embed
else:
pre_embeds_sum += embed
embed = pre_embeds_sum
if layer_norm:
embed = ln(embed, epsilon=1e-8, scope='ln_'+str(i))
return lutils.collect_named_outputs(outputs_collections, sc.name, embed)
@add_arg_scope
def dense_sum_conv_blocks(inputs, n, ch_dim, k_size,
mode: str = '2d',
layer_norm: bool = True,
outputs_collections=None,
scope=None):
"""Dense-sum blocks with 1D or 2D convolutional layers.
Args:
inputs:
n: Number of blocks. Each blocks has exactly one fully connected layer.
ch_dim: output channel dim.
k_size: int or tuple, kernel size
mode: str, '1d' or '2d'
layer_norm: Whether to use layer norm
outputs_collections:
scope:
Returns:
An outputs `Tensor`.
"""
with tf.variable_scope(
scope,
default_name='densesum_conv{}_blks'.format(mode)) as sc:
embed = inputs
pre_embeds_sum = None
if mode == '2d':
conv_layer = tfc_layers.conv2d
kernel_size = [k_size, k_size]
elif mode == '1d':
conv_layer = tfc_layers.conv1d
kernel_size = k_size
else:
raise ValueError('Unknown mode {}.'.format(mode))
for i in range(n):
embed = conv_layer(embed, ch_dim, kernel_size)
if i == 0:
pre_embeds_sum = embed
else:
pre_embeds_sum += embed
embed = pre_embeds_sum
if layer_norm:
embed = ln(embed, epsilon=1e-8, scope='ln_'+str(i))
return lutils.collect_named_outputs(outputs_collections, sc.name, embed)
@add_arg_scope
def res_sum_blocks(inputs,
n_blk: int,
n_skip: int,
enc_dim: int,
layer_norm: bool = False,
relu_input: bool = False,
outputs_collections=None,
scope=None):
"""Residual sum blocks with fully connected layers.
The res blocks are more usual with conv layers, some refs are:
ref1: https://github.com/deepmind/scalable_agent/blob/master/experiment.py#L152-L173
ref2: https://github.com/google-research/tf-slim/blob/master/tf_slim/nets/resnet_v1.py#L107-L126
Our impl here is similar to [ref2], where we add a "shortcut connection" with
the "residual connection" whose enc_dim are the same. [ref1] uses an
extra layer to enforce the same end_dim and then takes the sum.
Args:
inputs: a Tensor, (batch_size, inputs_dim)
n_blk: int, how many block
n_skip: int, how many conv layers to skip inside a block
enc_dim: int, output encoding dim
layer_norm: Whether to use layer norm
relu_input: Whether to relu the inputs
outputs_collections: str, outputs collections
scope: scope or scope
Returns:
A Tensor, the outputs, (batch_size, enc_dim)
"""
embed = inputs
with tf.variable_scope(scope, default_name='res_sum_blks') as sc:
for i in range(n_blk):
blk_inputs_dim = embed.shape[-1].value # last dim as feature dim
# shortcut connection
shortcut_scope = 'blk{}_shortcut'.format(i)
if blk_inputs_dim == enc_dim:
shortcut = identity_layer(embed, scope=shortcut_scope)
else:
shortcut = tfc_layers.fully_connected(embed, enc_dim,
activation_fn=None,
scope=shortcut_scope)
# residual connection
if relu_input:
embed = tf.nn.relu(embed)
for j in range(n_skip-1):
embed = tfc_layers.fully_connected(embed, enc_dim,
activation_fn=tf.nn.relu,
scope='blk{}_fc{}'.format(i, j))
embed = tfc_layers.fully_connected(embed, enc_dim, activation_fn=None,
scope='blk{}_fc{}'.format(
i, n_skip - 1)
)
# shortcut + residual
combined = shortcut + embed
if layer_norm:
combined = ln(combined, epsilon=1e-8, scope='ln_'+str(i))
embed = tf.nn.relu(combined)
return lutils.collect_named_outputs(outputs_collections,
sc.original_name_scope, embed)
@add_arg_scope
def res_sum_blocks_v2(inputs,
n_blk: int,
n_skip: int,
enc_dim: int,
layer_norm: bool = False,
outputs_collections=None,
scope=None):
"""Residual sum blocks with fully connected layers, v2.
Our impl is much like:
https://github.com/google-research/tf-slim/blob/8f0215e924996d7287392241bc8d8b1133d0c5ca/tf_slim/nets/resnet_v2.py#L102-L131
and the cited papers therein.
In our impl, the basic block looks:
when n_skip = 1,
- relu - weight -
then input adds to output.
when n_skip = 2,
- relu - weight - relu - weight -
then input adds to output, etc.
An optional layer_norm can be inserted *BEFORE* relu.
NOTE: add a leading layer WITHOUT activation and normalization to enforce the
channel size, see:
https://github.com/google-research/tf-slim/blob/8f0215e924996d7287392241bc8d8b1133d0c5ca/tf_slim/nets/resnet_v2.py#L213-L215
NOTE: add normalization + relu to the outputs when used as last layer, see:
https://github.com/google-research/tf-slim/blob/8f0215e924996d7287392241bc8d8b1133d0c5ca/tf_slim/nets/resnet_v2.py#L221-L223
Args:
inputs: a Tensor, (batch_size, inputs_dim)
n_blk: int, how many blocks
n_skip: int, how many weight layers to skip inside a block (i.e., how many
weight layers in the residual branch)
enc_dim: int, output encoding dim
layer_norm: Whether to use layer norm
outputs_collections: str, outputs collections
scope: scope or scope
Returns:
A Tensor, the outputs, (batch_size, enc_dim)
"""
embed = inputs
with tf.variable_scope(scope, default_name='res_sum_blks_v2') as sc:
for i in range(n_blk):
blk_inputs_dim = embed.shape[-1].value # last dim as feature dim
# shortcut connection (simply checking the channel dim)
shortcut = identity_layer(embed, scope='blk{}_shortcut'.format(i))
assert blk_inputs_dim == enc_dim, """
input dim {} must == enc dim {}. Otherwise, use a preceding layer WITHOUT
activation to enforce this.
""".format(blk_inputs_dim, enc_dim)
# residual connection
for j in range(n_skip):
if layer_norm:
embed = ln(embed, epsilon=1e-8, scope='ln_blk{}_fc{}'.format(i, j))
embed = tf.nn.relu(embed)
embed = tfc_layers.fully_connected(
embed, enc_dim,
activation_fn=None,
biases_initializer=None if layer_norm else tf.zeros_initializer(),
scope='blk{}_fc{}'.format(i, j)
)
# combine: shortcut + residual
embed = shortcut + embed
return lutils.collect_named_outputs(outputs_collections,
sc.original_name_scope, embed)
@add_arg_scope
def res_sum_conv_blocks(inputs,
n_blk: int,
n_skip: int,
ch_dim: int,
k_size: int,
mode: str = '2d',
layer_norm: bool = False,
outputs_collections=None,
scope=None):
"""Residual sum blocks with 1D or 2D convolutional layer.
ref1: https://github.com/deepmind/scalable_agent/blob/master/experiment.py#L152-L173
ref2: https://github.com/google-research/tf-slim/blob/master/tf_slim/nets/resnet_v1.py#L107-L126
Our impl here is similar to [ref2], where we add a "shortcut connection" with
the "residual connection" whose ch_dim are the same. [ref1] uses an
extra layer to enforce the same ch_dim and then takes the sum.
Args:
inputs: a Tensor, NHWC format (batch_size, H, W, inputs_dim)
n_blk: int, how many blocks
n_skip: int, how many conv layers to skip inside a block
ch_dim: int, channel dim
k_size: int, kerner size for 1D or 2D conv
mode: str, '2d' or '3d'
layer_norm: Whether to use layer norm
outputs_collections: str, outputs collection
scope: scope or scope
Returns:
A Tensor, the outputs, (batch_size, H, W, ch_dim)
"""
embed = inputs
# Note the tfc_layers.convXd padding defaults to SAME
if mode == '2d':
conv_layer = tfc_layers.conv2d
shortcut_k_size = [1, 1]
k_size = [k_size, k_size]
elif mode == '1d':
conv_layer = tfc_layers.conv1d
shortcut_k_size = 1
k_size = k_size
else:
raise ValueError('Unknown mode {}'.format(mode))
with tf.variable_scope(scope,
default_name='res_conv{}_blks'.format(mode)) as sc:
for i in range(n_blk):
blk_inputs_dim = embed.shape[-1].value # last dim as channel dim
# shortcut connection
shortcut_scope = 'blk{}_shortcut'.format(i)
if blk_inputs_dim == ch_dim:
shortcut = identity_layer(embed, scope=shortcut_scope)
else:
shortcut = conv_layer(embed, ch_dim, shortcut_k_size,
activation_fn=None, scope=shortcut_scope)
# residual connection
for j in range(n_skip-1):
embed = conv_layer(embed, ch_dim, k_size, activation_fn=tf.nn.relu,
scope='blk{}_conv{}'.format(i, j))
embed = conv_layer(embed, ch_dim, k_size, activation_fn=None,
scope='blk{}_conv{}'.format(i, n_skip - 1))
# shortcut + residual
combined = shortcut + embed
if layer_norm:
combined = ln(combined, epsilon=1e-8, scope='ln_'+str(i))
embed = tf.nn.relu(combined)
return lutils.collect_named_outputs(outputs_collections,
sc.original_name_scope, embed)
@add_arg_scope
def res_sum_bottleneck_blocks(inputs,
n_blk: int,
n_skip: int,
ch_dim: int,
bottleneck_ch_dim: int,
k_size: int,
mode: str = '2d',
layer_norm: bool = False,
layer_norm_type='inst_ln',
outputs_collections=None,
scope=None):
"""Residual sum blocks with 1D or 2D convolutional bottleneck layer.
ref1: https://github.com/deepmind/scalable_agent/blob/master/experiment.py#L152-L173
ref2: https://github.com/google-research/tf-slim/blob/master/tf_slim/nets/resnet_v1.py#L107-L126
Our impl here is similar to [ref2], where we add a "shortcut connection" with
the "residual connection" whose ch_dim are the same. [ref1] uses an
extra layer to enforce the same ch_dim and then takes the sum.
Args:
inputs: a Tensor, NHWC format (batch_size, H, W, inputs_dim)
n_blk: int, how many bottlenecks
n_skip: int, how many conv layers to skip inside a bottlenecks
ch_dim: int, channel dim
bottleneck_ch_dim: int, bottleneck channel dim, usually < ch_dim
k_size: int, kerner size for 1D or 2D conv
mode: str, '2d' or '3d'
layer_norm: Whether to use layer norm
layer_norm_type: str, yype of layer norm
outputs_collections: str, outputs collection
scope: scope or scope
Returns:
A Tensor, the outputs, (batch_size, H, W, ch_dim)
"""
embed = inputs
# Note the tfc_layers.convXd padding defaults to SAME
if mode == '2d':
conv_layer = tfc_layers.conv2d
one_size = [1, 1]
k_size = [k_size, k_size]
elif mode == '1d':
conv_layer = tfc_layers.conv1d
one_size = 1
k_size = k_size
else:
raise ValueError('Unknown mode {}'.format(mode))
with tf.variable_scope(scope,
default_name='res_conv{}_blks'.format(mode)) as sc:
for i in range(n_blk):
blk_inputs_dim = embed.shape[-1].value # last dim as channel dim
# shortcut connection
shortcut_scope = 'blk{}_shortcut'.format(i)
if blk_inputs_dim == ch_dim:
shortcut = identity_layer(embed, scope=shortcut_scope)
else:
shortcut = conv_layer(embed, ch_dim, one_size,
activation_fn=None, scope=shortcut_scope)
# residual connection
embed = conv_layer(embed, bottleneck_ch_dim, one_size,
activation_fn=tf.nn.relu,
scope='blk{}_conv{}'.format(i, 0))
for j in range(n_skip):
embed = conv_layer(embed, bottleneck_ch_dim, k_size,
activation_fn=tf.nn.relu,
scope='blk{}_conv{}'.format(i, j+1))
embed = conv_layer(embed, ch_dim, one_size, activation_fn=None,
scope='blk{}_conv{}'.format(i, n_skip + 1))
# shortcut + residual
combined = shortcut + embed
if layer_norm:
if layer_norm_type == 'ln':
combined = ln(combined, begin_norm_axis=1, epsilon=1e-8,
scope='ln_'+str(i))
elif layer_norm_type == 'inst_ln':
combined = inst_ln(combined, epsilon=1e-8, scope='inst_ln' + str(i))
else:
raise KeyError('Unknown layer_norm_type {}'.format(layer_norm_type))
embed = tf.nn.relu(combined)
return lutils.collect_named_outputs(outputs_collections,
sc.original_name_scope, embed)
def res_sum_bottleneck_blocks_v2(inputs,
n_blk: int,
n_skip: int,
ch_dim: int,
bottleneck_ch_dim: int,
k_size: int,
mode: str = '2d',
layer_norm_type='inst_ln',
outputs_collections=None,
scope=None):
"""Residual sum blocks with 1D or 2D convolutional bottleneck layer, v2.
Our impl is much like:
https://github.com/google-research/tf-slim/blob/8f0215e924996d7287392241bc8d8b1133d0c5ca/tf_slim/nets/resnet_v2.py#L102-L131
which is the so-called "pre-activation res block". See also the cited papers
therein.
In our impl, the basic block looks:
when n_skip = 1
- relu - weight -
then input adds to output.
when n_skip = 2
- relu - weight - relu - weight -
then input adds to output, etc.
*EACH* weight layer should be understood as a bottleneck structure that
expands to a narrow-wide-wide three-conv-layer, i.e.,
- weight -
means
- 1x1conv_narrow - kxkconv_narrow - 1x1conv_wide
An optional layer_norm can be inserted *BEFORE* relu.
NOTE: add a leading layer WITHOUT activation and normalization to enforce the
channel size, see:
https://github.com/google-research/tf-slim/blob/8f0215e924996d7287392241bc8d8b1133d0c5ca/tf_slim/nets/resnet_v2.py#L213-L215
NOTE: add normalization + relu to the outputs when used as last layer, see:
https://github.com/google-research/tf-slim/blob/8f0215e924996d7287392241bc8d8b1133d0c5ca/tf_slim/nets/resnet_v2.py#L221-L223
Args:
inputs: a Tensor, NHWC format (batch_size, H, W, inputs_dim)
n_blk: int, how many blocks
n_skip: int, how many weight layers to skip inside a block (i.e., how
many weight layers in the residual branch)
ch_dim: int, channel dim
bottleneck_ch_dim: int, bottleneck channel dim, usually < ch_dim
k_size: int, kernel size for 1D or 2D conv
mode: str, '2d' or '3d'
layer_norm_type: str, type of layer norm. None means no layer norm.
outputs_collections: str, outputs collection
scope: scope or scope
Returns:
A Tensor, the outputs, (batch_size, H, W, ch_dim)
"""
embed = inputs
# Note the tfc_layers.convXd padding defaults to SAME
if mode == '2d':
conv_layer = tfc_layers.conv2d
k_size_one = [1, 1]
k_size = [k_size, k_size]
elif mode == '1d':
conv_layer = tfc_layers.conv1d
k_size_one = 1
k_size = k_size
else:
raise ValueError('Unknown mode {}'.format(mode))
with tf.variable_scope(
scope,
default_name='res_sum_bottleneck{}_blks_v2'.format(mode)) as sc:
for i in range(n_blk):
blk_inputs_dim = embed.shape[-1].value # last dim as channel dim
# shortcut connection (simply checking the channel dim)
shortcut = identity_layer(embed, scope='blk{}_shortcut'.format(i))
assert blk_inputs_dim == ch_dim, """
input dim {} must == ch dim {}. Otherwise, use a preceding layer WITHOUT
activation to enforce this.
""".format(blk_inputs_dim, ch_dim)
# residual connection
for j in range(n_skip):
# (bs, H, W, C)
if layer_norm_type is not None:
# NOTE(pengsun): a single layer_norm should suffice here
if layer_norm_type == 'ln':
embed = ln(embed, begin_norm_axis=1, epsilon=1e-8,
scope='ln_blk{}_{}'.format(i, j))
elif layer_norm_type == 'inst_ln':
embed = inst_ln(embed, epsilon=1e-8,
scope='inst_ln_blk{}_{}'.format(i, j))
else:
raise KeyError('Unknown layer_norm_type {}'.format(layer_norm_type))
embed = tf.nn.relu(embed)
# (bs, H, W, C)
embed = conv_layer(embed, bottleneck_ch_dim, k_size_one,
activation_fn=tf.nn.relu,
scope='blk{}_conv{}_0'.format(i, j))
# (bs, H, W, BC)
embed = conv_layer(embed, bottleneck_ch_dim, k_size,
activation_fn=tf.nn.relu,
scope='blk{}_conv{}_1'.format(i, j))
# (bs H, W, BC)
embed = conv_layer(
embed, ch_dim, k_size_one,
activation_fn=None,
biases_initializer=(None if layer_norm_type else
tf.zeros_initializer()),
scope='blk{}_conv{}_2'.format(i, j)
)
# (bs, H, W, C)
# combine: shortcut + residual
embed = shortcut + embed
return lutils.collect_named_outputs(outputs_collections,
sc.original_name_scope, embed)
def res_sum_bottleneck_blocks_v3(inputs,
n_blk: int,
n_skip: int,
ch_dim: int,
bottleneck_ch_dim: int,
k_size: int,
mode: str = '2d',
layer_norm_type='inst_ln',
outputs_collections=None,
scope=None):
"""Residual sum blocks with 1D or 2D convolutional bottleneck layer, v3.
Our impl is much like:
https://github.com/google-research/tf-slim/blob/8f0215e924996d7287392241bc8d8b1133d0c5ca/tf_slim/nets/resnet_v2.py#L102-L131
which is the so-called "pre-activation res block". See also the cited papers
therein.
In our impl, the basic block looks:
when n_skip = 1
- relu - weight -
then input adds to output.
when n_skip = 2
- relu - weight - relu - weight -
then input adds to output, etc.
*EACH* weight layer should be understood as a bottleneck structure that
expands to a narrow-wide-wide three-conv-layer, i.e.,
- weight -
means
- 1x1conv_narrow - kxkconv_narrow - 1x1conv_wide
An optional layer_norm can be inserted *BEFORE* relu.
NOTE: add a leading layer WITHOUT activation and normalization to enforce the
channel size, see:
https://github.com/google-research/tf-slim/blob/8f0215e924996d7287392241bc8d8b1133d0c5ca/tf_slim/nets/resnet_v2.py#L213-L215
NOTE: add normalization + relu to the outputs when used as last layer, see:
https://github.com/google-research/tf-slim/blob/8f0215e924996d7287392241bc8d8b1133d0c5ca/tf_slim/nets/resnet_v2.py#L221-L223
NOTE: normalization after EACH bottleneck conv weight, different from v2
Args:
inputs: a Tensor, NHWC format (batch_size, H, W, inputs_dim)
n_blk: int, how many blocks
n_skip: int, how many weight layers to skip inside a block (i.e., how
many weight layers in the residual branch)
ch_dim: int, channel dim
bottleneck_ch_dim: int, bottleneck channel dim, usually < ch_dim
k_size: int, kernel size for 1D or 2D conv
mode: str, '2d' or '3d'
layer_norm_type: str, type of layer norm. None means no layer norm.
outputs_collections: str, outputs collection
scope: scope or scope
Returns:
A Tensor, the outputs, (batch_size, H, W, ch_dim)
"""
embed = inputs
# Note the tfc_layers.convXd padding defaults to SAME
if mode == '2d':
conv_layer = tfc_layers.conv2d
k_size_one = [1, 1]
k_size = [k_size, k_size]
elif mode == '1d':
conv_layer = tfc_layers.conv1d
k_size_one = 1
k_size = k_size
else:
raise ValueError('Unknown mode {}'.format(mode))
with tf.variable_scope(
scope,
default_name='res_sum_bottleneck{}_blks_v3'.format(mode)) as sc:
for i in range(n_blk):
blk_inputs_dim = embed.shape[-1].value # last dim as channel dim
# shortcut connection (simply checking the channel dim)
shortcut = identity_layer(embed, scope='blk{}_shortcut'.format(i))
assert blk_inputs_dim == ch_dim, """
input dim {} must == ch dim {}. Otherwise, use a preceding layer WITHOUT
activation to enforce this.
""".format(blk_inputs_dim, ch_dim)
# residual connection
conv_norm = None
if layer_norm_type is not None:
# TODO(pengsun): refactor the code, combine with pre-act norm stuff
if layer_norm_type == 'ln':
conv_norm = ln
elif layer_norm_type == 'inst_ln':
conv_norm = inst_ln
else:
raise KeyError('Unknown layer_norm_type {}'.format(layer_norm_type))
for j in range(n_skip):
# (bs, H, W, C)
if layer_norm_type is not None:
# pre-activation normalization if any
if layer_norm_type == 'ln':
embed = ln(embed, begin_norm_axis=1, epsilon=1e-8,
scope='ln_blk{}_{}'.format(i, j))
elif layer_norm_type == 'inst_ln':
embed = inst_ln(embed, epsilon=1e-8,
scope='inst_ln_blk{}_{}'.format(i, j))
else:
raise KeyError('Unknown layer_norm_type {}'.format(layer_norm_type))
embed = tf.nn.relu(embed)
# (bs, H, W, C)
embed = conv_layer(embed, bottleneck_ch_dim, k_size_one,
activation_fn=tf.nn.relu,
normalizer_fn=conv_norm,
scope='blk{}_conv{}_0'.format(i, j))
# (bs, H, W, BC)
embed = conv_layer(embed, bottleneck_ch_dim, k_size,
activation_fn=tf.nn.relu,
normalizer_fn=conv_norm,
scope='blk{}_conv{}_1'.format(i, j))
# (bs H, W, BC)
embed = conv_layer(
embed, ch_dim, k_size_one,
activation_fn=None,
normalizer_fn=None,
biases_initializer=(None if layer_norm_type else
tf.zeros_initializer()),
scope='blk{}_conv{}_2'.format(i, j)
)
# (bs, H, W, C)
# combine: shortcut + residual
embed = shortcut + embed
return lutils.collect_named_outputs(outputs_collections,
sc.original_name_scope, embed)
# transformer stuff
def trans_mask(inputs, queries=None, keys=None, mtype=None):
"""Masks paddings on keys or queries to inputs.
TODO: doc where it is from
e.g.,
>> queries = tf.constant([[[1.],
[2.],
[0.]]], tf.float32) # (1, 3, 1)
>> keys = tf.constant([[[4.],
[0.]]], tf.float32) # (1, 2, 1)
>> inputs = tf.constant([[[4., 0.],
[8., 0.],
[0., 0.]]], tf.float32)
>> mask(inputs, queries, keys, "key")
array([[[ 4.0000000e+00, -4.2949673e+09],
[ 8.0000000e+00, -4.2949673e+09],
[ 0.0000000e+00, -4.2949673e+09]]], dtype=float32)
>> inputs = tf.constant([[[1., 0.],
[1., 0.],
[1., 0.]]], tf.float32)
>> mask(inputs, queries, keys, "query")
array([[[1., 0.],
[1., 0.],
[0., 0.]]], dtype=float32)
Args:
inputs: 3d tensor. (N, T_q, T_k)
queries: 3d tensor. (N, T_q, d)
keys: 3d tensor. (N, T_k, d)
mtype: str
Returns:
A `Tensor` representing the output mask.
"""
padding_num = -2 ** 32 + 1
if mtype in ("k", "key", "keys"):
# Generate masks
masks = tf.sign(tf.reduce_sum(tf.abs(keys), axis=-1)) # (N, T_k)
masks = tf.expand_dims(masks, 1) # (N, 1, T_k)
masks = tf.tile(masks, [1, tf.shape(queries)[1], 1]) # (N, T_q, T_k)
# Apply masks to inputs
paddings = tf.ones_like(inputs) * padding_num
outputs = tf.where(tf.equal(masks, 0), paddings, inputs) # (N, T_q, T_k)
elif mtype in ("q", "query", "queries"):
# Generate masks
masks = tf.sign(tf.reduce_sum(tf.abs(queries), axis=-1)) # (N, T_q)
masks = tf.expand_dims(masks, -1) # (N, T_q, 1)
masks = tf.tile(masks, [1, 1, tf.shape(keys)[1]]) # (N, T_q, T_k)
# Apply masks to inputs
outputs = inputs * masks
elif mtype in ("f", "future", "right"):
diag_vals = tf.ones_like(inputs[0, :, :]) # (T_q, T_k)
tril = tf.linalg.LinearOperatorLowerTriangular(
diag_vals).to_dense() # (T_q, T_k)
masks = tf.tile(tf.expand_dims(tril, 0),
[tf.shape(inputs)[0], 1, 1]) # (N, T_q, T_k)
paddings = tf.ones_like(masks) * padding_num
outputs = tf.where(tf.equal(masks, 0), paddings, inputs)
else:
print("Check if you entered mtype correctly!")
raise ValueError('wtf mtype {}?'.format(mtype))
return outputs
def scaled_dot_product_attention(Q, K, V,
causality=False, dropout_rate=0.,
training=True,
pointer=False,
scope=None):
""" Scaled dot product attention, See 3.2.1.
TODO: doc where it is from
Args:—
Q: Packed queries. 3d tensor. [N, T_q, d_k].
K: Packed keys. 3d tensor. [N, T_k, d_k].
V: Packed values. 3d tensor. [N, T_k, d_v].
causality: If True, applies masking for future blinding
dropout_rate: A floating point number of [0, 1].
training: boolean for controlling droput
scope: Optional scope for `variable_scope`.
"""
with tf.variable_scope(scope, default_name="scaled_dot_product_attention"):
d_k = Q.get_shape().as_list()[-1]
# dot product
outputs = tf.matmul(Q, tf.transpose(K, [0, 2, 1])) # (N, T_q, T_k)
# scale
outputs /= d_k ** 0.5
# key masking
outputs = trans_mask(outputs, Q, K, mtype="key")
# causality or future blinding masking
if causality:
outputs = trans_mask(outputs, mtype="future")
# softmax
output_logits = outputs
outputs = tf.nn.softmax(outputs) # default axis=-1
if pointer:
return output_logits, outputs
attention = tf.transpose(outputs, [0, 2, 1]) # only used for tf.summary
tf.summary.image("attention", tf.expand_dims(attention[:1], -1))
# query masking
outputs = trans_mask(outputs, Q, K, mtype="query")
# dropout
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)
# weighted sum (context vectors)
outputs = tf.matmul(outputs, V) # (N, T_q, d_v)
return outputs
def scaled_dot_product_attention_v2(Q, K, V, matrix_mask,
dropout_rate=0.0,
scope=None):
""" Simplified from scaled_dot_product_attention
Args:
Q: Packed queries. 3d tensor. [N, T_q, d_k].
K: Packed keys. 3d tensor. [N, T_k, d_k].
V: Packed values. 3d tensor. [N, T_k, d_v].
causality: If True, applies masking for future blinding
dropout_rate: A floating point number of [0, 1].
training: boolean for controlling droput
scope: Optional scope for `variable_scope`.
"""
with tf.variable_scope(scope, default_name="scaled_dot_product_attention"):
d_k = Q.get_shape().as_list()[-1]
# dot product
outputs = tf.matmul(Q, tf.transpose(K, [0, 2, 1])) # (N, T_q, T_k)
# scale
outputs /= d_k ** 0.5
# key masking
outputs = trans_mask(outputs, Q, K, mtype="key")
# entry_mask
outputs = tp_ops.mask_logits(outputs, matrix_mask)
# softmax
outputs = tf.nn.softmax(outputs) # default axis=-1
# attention = tf.transpose(outputs, [0, 2, 1]) # only used for tf.summary
# tf.summary.image("attention", tf.expand_dims(attention[:1], -1))
# query masking
outputs = trans_mask(outputs, Q, K, mtype="query")
# dropout
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=True)
# weighted sum (context vectors)
outputs = tf.matmul(outputs, V) # (N, T_q, d_v)
return outputs
def multihead_attention(queries, keys, values,
num_heads=8,
dropout_rate=0.0,
training=True,
causality=False,
pointer=False,
scope=None):
"""Applies multihead attention. See 3.2.2
Original Reference: A TensorFlow Implementation of the Transformer: Attention Is All You Need
https://github.com/Kyubyong/transformer
Kyubyong Park's implementation, a highly starred repository besides google's
Args:
queries: A 3d tensor with shape of [N, T_q, d_model].
keys: A 3d tensor with shape of [N, T_k, d_model].
values: A 3d tensor with shape of [N, T_k, d_model].
num_heads: An int. Number of heads.
dropout_rate: A floating point number.
training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
scope: Optional scope for `variable_scope`.
Returns:
A 3d tensor with shape of (N, T_q, C)
"""
d_model = queries.get_shape().as_list()[-1]
with tf.variable_scope(scope, default_name='multihead_attention'):
# Linear projections
Q = tf.layers.dense(queries, d_model, use_bias=False) # (N, T_q, d_model)
K = tf.layers.dense(keys, d_model, use_bias=False) # (N, T_k, d_model)
V = tf.layers.dense(values, d_model, use_bias=False) # (N, T_k, d_model)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2),
axis=0) # (h*N, T_q, d_model/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2),
axis=0) # (h*N, T_k, d_model/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2),
axis=0) # (h*N, T_k, d_model/h)
# Attention
if pointer:
# do not use multi-head
logits, pd = scaled_dot_product_attention(Q, K, V, causality,
dropout_rate, training, pointer)
return logits, pd
else:
outputs = scaled_dot_product_attention(Q_, K_, V_, causality,
dropout_rate, training, pointer)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0),
axis=2) # (N, T_q, d_model)
# Residual connection
outputs += queries
# Normalize
outputs = ln(outputs)
return outputs
def multihead_attention_v2(queries, keys, values, entry_mask,
num_heads=8,
dropout_rate=0.0,
scope=None):
"""Simplified from multihead_attention
Args:
queries: A 3d tensor with shape of [N, T_q, d_model].
keys: A 3d tensor with shape of [N, T_k, d_model].
values: A 3d tensor with shape of [N, T_k, d_model].
num_heads: An int. Number of heads.
dropout_rate: A floating point number.
training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
scope: Optional scope for `variable_scope`.
Returns:
A 3d tensor with shape of (N, T_q, C)
"""
d_model = queries.get_shape().as_list()[-1]
with tf.variable_scope(scope, default_name='multihead_attention'):
# Linear projections
Q = tf.layers.dense(queries, d_model, use_bias=False) # (N, T_q, d_model)
K = tf.layers.dense(keys, d_model, use_bias=False) # (N, T_k, d_model)
V = tf.layers.dense(values, d_model, use_bias=False) # (N, T_k, d_model)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2),
axis=0) # (h*N, T_q, d_model/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2),
axis=0) # (h*N, T_k, d_model/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2),
axis=0) # (h*N, T_k, d_model/h)
# Duplicate mask
matrix_mask = tf.tile(tf.expand_dims(entry_mask, axis=1), [1, entry_mask.shape[-1], 1])
matrix_mask = tf.math.logical_and(
matrix_mask,
tf.tile(tf.expand_dims(entry_mask, axis=-1), [1, 1, entry_mask.shape[-1]])
)
matrix_mask = tf.tile(matrix_mask, [num_heads, 1, 1])
outputs = scaled_dot_product_attention_v2(Q_, K_, V_, matrix_mask, dropout_rate)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0),
axis=2) # (N, T_q, d_model)
# Residual connection
outputs += queries
# Normalize
outputs = ln(outputs)
return outputs
def multihead_attention_v3(queries, keys, values, entry_mask,
num_heads=8, enc_dim=128,
dropout_rate=0.0, scope=None):
""" Identical to AStar
Args:
queries: A 3d tensor with shape of [N, T_q, d_model].
keys: A 3d tensor with shape of [N, T_k, d_model].
values: A 3d tensor with shape of [N, T_k, d_model].
num_heads: An int. Number of heads.
dropout_rate: A floating point number.
training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
scope: Optional scope for `variable_scope`.
Returns:
A 3d tensor with shape of (N, T_q, C)
"""
# d_model = queries.get_shape().as_list()[-1]
d_model = enc_dim
with tf.variable_scope(scope, default_name='multihead_attention'):
# Linear projections
Q = tf.layers.dense(queries, d_model, use_bias=False) # (N, T_q, d_model)
K = tf.layers.dense(keys, d_model, use_bias=False) # (N, T_k, d_model)
V = tf.layers.dense(values, d_model, use_bias=False) # (N, T_k, d_model)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2),
axis=0) # (h*N, T_q, d_model/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2),
axis=0) # (h*N, T_k, d_model/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2),
axis=0) # (h*N, T_k, d_model/h)
# Duplicate mask
matrix_mask = tf.tile(tf.expand_dims(entry_mask, axis=1), [1, entry_mask.shape[-1], 1])
matrix_mask = tf.math.logical_and(
matrix_mask,
tf.tile(tf.expand_dims(entry_mask, axis=-1), [1, 1, entry_mask.shape[-1]])
)
matrix_mask = tf.tile(matrix_mask, [num_heads, 1, 1])
outputs = scaled_dot_product_attention_v2(Q_, K_, V_, matrix_mask, dropout_rate)
# fc to double the number of channels to 256 and
# the head results are summed and passed through
# a 2-layer MLP with hidden size 1024 and output
# size 256
outputs = tfc_layers.fully_connected(outputs, 256)
outputs = tf.add_n(tf.split(outputs, num_heads, axis=0))
outputs = tfc_layers.fully_connected(outputs, 1024)
outputs = tfc_layers.fully_connected(outputs, 256, activation_fn=None)
# # Residual connection
# outputs += queries
# # Normalize
# outputs = ln(outputs)
return outputs
def self_attention_ffsum(queries, keys, values, entry_mask,
num_heads=8, enc_dim=128,
dropout_rate=0.0, scope=None):
""" Self Attention + Feed Forward using Sum.
Simplified from multihead_attention and follows the AStar paper impl.
Args:
queries: A 3d tensor with shape of [N, T_q, d_model].
keys: A 3d tensor with shape of [N, T_k, d_model].
values: A 3d tensor with shape of [N, T_k, d_model].
num_heads: An int. Number of heads.
enc_dim:
dropout_rate: A floating point number.
training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
scope: Optional scope for `variable_scope`.
Returns:
A 3d tensor with shape of (N, T_q, C)
"""
# d_model = queries.get_shape().as_list()[-1]
d_model = enc_dim
with tf.variable_scope(scope, default_name='self_attention_ffsum'):
# The Self Attention Block
# Linear projections
Q = tf.layers.dense(queries, d_model, use_bias=False) # (N, T_q, d_model)
K = tf.layers.dense(keys, d_model, use_bias=False) # (N, T_k, d_model)
V = tf.layers.dense(values, d_model, use_bias=False) # (N, T_k, d_model)
# Split to num_heads
Q_ = tf.concat(tf.split(Q, num_heads, axis=2),
axis=0) # (h*N, T_q, d_model/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2),
axis=0) # (h*N, T_k, d_model/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2),
axis=0) # (h*N, T_k, d_model/h)
# Duplicate mask
matrix_mask = tf.tile(tf.expand_dims(entry_mask, axis=1),
[1, entry_mask.shape[-1], 1])
matrix_mask = tf.math.logical_and(
matrix_mask,
tf.tile(tf.expand_dims(entry_mask, axis=-1), [1, 1, entry_mask.shape[-1]])
)
matrix_mask = tf.tile(matrix_mask, [num_heads, 1, 1])
# Attention
outputs = scaled_dot_product_attention_v2(Q_, K_, V_, matrix_mask,
dropout_rate)
outputs = tf.concat(tf.split(outputs, num_heads, axis=0),
axis=2) # Restore shape: (N, T_q, d_model)
outputs += queries # Residual connection
outputs = ln(outputs, scope='attention_ln') # normalize
# The ff Block, AStar paper goes:
# In each layer, each self-attention head uses keys, queries, and values of
# size 128, then passes the aggregated values through a Conv1D with kernel
# size 1 to double the number of channels (to 256). The head results are
# summed and passed through a 2-layer MLP with hidden size 1024 and output
# size 256.
skip = outputs
outputs_split = tf.split(outputs, num_heads, axis=-1)
for i in range(num_heads):
# Note(pengsun): equivalent to conv1d with kernel size 1
outputs_split[i] = tfc_layers.fully_connected(outputs_split[i], 256)
outputs = tf.add_n(outputs_split)
outputs = tfc_layers.fully_connected(outputs, 1024)
outputs = tfc_layers.fully_connected(outputs, 256, activation_fn=None)
outputs += skip # Residual connection
outputs = ln(outputs, scope='ff_ln') # normalize
return outputs
def ff(inputs, num_units, scope=None):
"""position-wise feed forward net. See 3.3
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
Returns:
A 3d tensor with the same shape and dtype as inputs
"""
with tf.variable_scope(scope, default_name="positionwise_feedforward"):
# Inner layer
outputs = tf.layers.dense(inputs, num_units[0], activation=tf.nn.relu)
# Outer layer
outputs = tf.layers.dense(outputs, num_units[1])
# Residual connection
outputs += inputs
# Normalize
outputs = ln(outputs)
return outputs
# rnn stuff
@add_arg_scope
def lstm(inputs_x_seq: list,
inputs_terminal_mask_seq: list,
inputs_state,
nh,
forget_bias=1.0,
weights_initializer=ortho_init(1.0),
biases_initializer=tf.constant_initializer(0.0),
weights_regularizer=None,
biases_regularizer=None,
use_layer_norm=False,
scope=None):
""" An lstm layer (cell) tailored for RL task.
It includes a mask that indicates the terminal time step of an unroll.
Borrowed and modified from openai/baselines.
Args:
inputs_x_seq: list of rollout_len Tensors, each sized (nrollout, dim)
inputs_terminal_mask_seq: list of rollout_len Tensors, each sized
(nrollout, 1). A mask that indicates whether it is terminal of an
unroll.
inputs_state: Tensor, (nrollout, 2*nh), initial hidden state of the input
rollout
nh: int, number of hiddent units.
forget_bias:
weights_initializer=ortho_init(1.0),
biases_initializer=tf.constant_initializer(0.0),
scope=None
Returns:
A list of outputs
A Tensor, the updated hidden state
"""
# shorter names
xs, ms, s = inputs_x_seq, inputs_terminal_mask_seq, inputs_state
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope, default_name='lstm'):
# weights & biases
# Use xavier_initializer for wx per qingwei's verification
wx = tf.get_variable("wx", [nin, nh * 4], initializer=xavier_initializer(),
regularizer=weights_regularizer)
wh = tf.get_variable("wh", [nh, nh * 4], initializer=weights_initializer,
regularizer=weights_regularizer)
b = tf.get_variable("b", [nh * 4], initializer=biases_initializer,
regularizer=biases_regularizer)
# normalization function
x_nf, h_nf, c_nf = None, None, None
if use_layer_norm:
with tf.variable_scope('x_ln', reuse=tf.AUTO_REUSE) as sc:
x_nf = partial(ln, epsilon=1e-5, enable_openai_impl=True, scope=sc)
with tf.variable_scope('h_ln', reuse=tf.AUTO_REUSE) as sc:
h_nf = partial(ln, epsilon=1e-5, enable_openai_impl=True, scope=sc)
with tf.variable_scope('c_ln', reuse=tf.AUTO_REUSE) as sc:
c_nf = partial(ln, epsilon=1e-5, enable_openai_impl=True, scope=sc)
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c * (1 - m)
h = h * (1 - m)
c, h = one_step_lstm_op(c, h, x, wx, wh, b, forget_bias, x_nf, h_nf, c_nf)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
@add_arg_scope
def k_lstm(inputs_x_seq: list,
inputs_termial_mask_seq: list,
inputs_state,
nh,
k,
forget_bias=0.0,
weights_initializer=ortho_init(1.0),
biases_initializer=tf.constant_initializer(0.0),
weights_regularizer=None,
biases_regularizer=None,
use_layer_norm=False,
scope=None):
""" An skip-k-step lstm layer (cell) tailored for RL task.
It includes a mask that indicates the terminal time step of an unroll.
Borrowed and modified from openai/baselines.
Args:
inputs_x_seq: list of rollout_len Tensors, each sized (nrollout, dim)
inputs_terminal_mask_seq: list of rollout_len Tensors, each sized
(nrollout, 1). A mask that indicates whether it is terminal of an
unroll.
inputs_state: Tensor, (nrollout, 2*nh+1), initial hidden state of the
input rollout. The last dim stores cyclic step count information in
{0, 1, ... , k-1}
nh:
k: number of steps to skip
forget_bias: float, forget bias
weights_initializer: defaults to ortho_init(1.0),
biases_initializer: defaults to tf.constant_initializer(0.0),
scope:
Returns
A list of outputs.
A Tensor, the updated hidden state.
"""
# shorter names
xs, ms, s = inputs_x_seq, inputs_termial_mask_seq, inputs_state
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope, default_name='k_lstm'):
# weights & biases
wx = tf.get_variable("wx", [nin, nh * 4], initializer=weights_initializer,
regularizer=weights_regularizer)
wh = tf.get_variable("wh", [nh, nh * 4], initializer=weights_initializer,
regularizer=weights_regularizer)
b = tf.get_variable("b", [nh * 4], initializer=biases_initializer,
regularizer=biases_regularizer)
# normalization function
x_nf, h_nf, c_nf = None, None, None
if use_layer_norm:
with tf.variable_scope('x_ln', reuse=tf.AUTO_REUSE) as sc:
x_nf = partial(ln, scope=sc)
with tf.variable_scope('h_ln', reuse=tf.AUTO_REUSE) as sc:
h_nf = partial(ln, scope=sc)
with tf.variable_scope('c_ln', reuse=tf.AUTO_REUSE) as sc:
c_nf = partial(ln, scope=sc)
nh = (s.shape[1].value - 1) // 2
c, h, cyclic_step_count = tf.split(axis=1, value=s,
num_or_size_splits=[nh, nh, 1])
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c * (1 - m)
h = h * (1 - m)
c_lstm, h_lstm = one_step_lstm_op(c, h, x, wx, wh, b, forget_bias, x_nf,
h_nf, c_nf)
mod_mask = tf.equal(cyclic_step_count, 0)
mod_mask = tf.cast(mod_mask, tf.float32)
c = tf.multiply(mod_mask, c_lstm) + tf.multiply(1 - mod_mask, c)
h = tf.multiply(mod_mask, h_lstm) + tf.multiply(1 - mod_mask, h)
xs[idx] = h
# prepare for the next time step
cyclic_step_count = tf.mod(cyclic_step_count + 1, k)
s = tf.concat(axis=1, values=[c, h, cyclic_step_count])
return xs, s
# Action stuff
ActionHead = namedtuple('ActionHead', [
'flatparam',
'argmax',
'sam', # sampled action
'neglogp', # negative log-likelihood, i.e., -log(p)
'pd', # probabilistic distribution
'ent' # entropy
])
@add_arg_scope
def to_action_head(flatparam, pdtype_cls, temperature=1.0,
nseq=None, mask=None, labels=None, sample=None, scope=None):
"""Convert logits to ActionHead.
Args:
flatparam: (batch_size, pdtype_cls.param_shape())
pdtype_cls: distribution type
scope: for tf.variable_scope
Returns:
A ActionHead class instance.
"""
if pdtype_cls == DiagGaussianPdType:
n_actions = int(flatparam.shape[-1]//2)
# logstd -> logstd + 0.5 * log(T)
if temperature != 1.0:
mean, logstd = tf.split(axis=-1, num_or_size_splits=2,
value=flatparam)
flatparam = tf.concat(
[mean, logstd + 0.5 * tf.log(float(temperature))], axis=-1)
else:
flatparam /= temperature
n_actions = flatparam.shape[-1]
if pdtype_cls == CategoricalPdType:
pdtype = pdtype_cls(ncat=n_actions)
elif pdtype_cls == BernoulliPdType:
pdtype = pdtype_cls(size=n_actions)
elif pdtype_cls == MaskSeqCategoricalPdType:
pdtype = pdtype_cls(nseq=nseq, ncat=n_actions, mask=mask, labels=labels)
flatparam = tf.reshape(flatparam, shape=(-1, nseq*n_actions))
elif pdtype_cls == DiagGaussianPdType:
pdtype = pdtype_cls(size=n_actions)
else:
raise NotImplemented('Unknown pdtype_cls {}'.format(pdtype_cls))
with tf.variable_scope(scope, default_name='to_action_head'):
head_pd = pdtype.pdfromflat(flatparam)
head_argmax = head_pd.mode()
# Note(pengsun): we cannot write `head_sam = sample or head_pd.sample()`,
# as it is interpreted as `Tensor or Tensor` and raises an error
if sample is not None:
head_sam = sample
else:
head_sam = head_pd.sample()
head_neglogp = head_pd.neglogp(head_sam)
head_entropy = head_pd.entropy()
return ActionHead(flatparam, head_argmax, head_sam, head_neglogp, head_pd,
head_entropy)
@add_arg_scope
def discrete_action_head(inputs,
n_actions,
pdtype_cls,
mask=None,
enc_dim=None,
embed_scope=None,
temperature=1.0,
scope=None):
"""Layer that makes an action head.
The embedding layer is created or reused by taking the embed_scope.
Args:
inputs:
n_actions:
pdtype_cls:
mask:
enc_dim:
embed_scope:
scope:
Returns:
A `Tensor` representing the logits.
"""
with tf.variable_scope(scope, default_name='discrete_action_head'):
head_logits = tfc_layers.fully_connected(inputs,
n_actions,
activation_fn=None,
normalizer_fn=None,
scope='logits')
if enc_dim is not None and embed_scope is not None:
# get the action embedding to do the "offset-add" (invented by lxhan)
# TODO(pengsun): double-check the two-layer size, why the n_actions for
# the first layer?
head_h = tfc_layers.fully_connected(inputs, n_actions, scope='bfc1')
# [bs, n_actions]
head_h_branch = tfc_layers.fully_connected(head_h,
enc_dim,
activation_fn=None,
normalizer_fn=None,
scope='bfc2')
# [bs, enc_dim]
offset = linear_embed(head_h_branch,
vocab_size=n_actions,
enc_size=enc_dim,
inverse_embed=True,
scope=embed_scope)
# [bs, n_actions]
# do the offset-adding
head_logits += offset
if mask is not None:
head_logits = tp_ops.mask_logits(head_logits, mask)
return to_action_head(head_logits, pdtype_cls, temperature=temperature)
@add_arg_scope
def discrete_action_head_v2(inputs,
n_actions,
pdtype_cls,
context=None,
mask=None,
temperature=1.0,
scope=None):
"""Layer that makes an action head, v2.
Convert the inputs to logits. Can pass in an optional context for GLU, and an
optional mask for available actions.
Args:
inputs: input Tensor
n_actions: int, number of actions
pdtype_cls:
mask: (bs, n_actions), mask for available actions. Default None means "not
use"
context: (bs, M), context Tensor. For GLU. Default None means "not use"
outputs_collections:
scope:
Returns:
A `Tensor` representing the logits.
"""
with tf.variable_scope(scope, default_name='discrete_action_head_v2'):
if context is None:
head_logits = tfc_layers.fully_connected(inputs,
n_actions,
activation_fn=None,
normalizer_fn=None,
scope='logits')
else:
head_logits = glu(inputs, context, n_actions, scope='gated_logits')
if mask is not None:
head_logits = tp_ops.mask_logits(head_logits, mask)
return to_action_head(head_logits, pdtype_cls, temperature=temperature)
@add_arg_scope
def loc_action_head(inputs,
pdtype_cls,
mask=None,
temperature=1.0,
logits_mode='1x1',
scatter_ind=None,
scatter_bs=None,
scope=None):
"""Layer that makes a location action (one-hot of a 2D map) head.
Args:
inputs: [bs, H, W, C]
pdtype_cls: distribtion
mask: [bs, H, W]
logits_mode: whether to perform scaling up output size x2
scope:
Returns:
An action head for the flatten logits, [bs, HH*WW]. Cases are:
logits_mode == '1x1': HH = H, WW = W
logits_mode == '3x3up2': HH = 2*H, WW = 2*W
"""
with tf.variable_scope(scope, default_name='loc_action_head'):
# [bs, H, W, C]
if logits_mode == '3x3up2':
loc_logits = tfc_layers.conv2d_transpose(inputs, 1, [3, 3],
stride=2,
activation_fn=None,
normalizer_fn=None,
scope='3x3up2mapping')
elif logits_mode == '1x1':
loc_logits = tfc_layers.conv2d(inputs, 1, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='1x1mapping')
else:
raise ValueError('Unknown logits_mode {}'.format(logits_mode))
# [bs, HH, WW, 1]
loc_logits = tf.squeeze(loc_logits, axis=-1)
# [bs, HH, WW]
if mask is not None:
loc_logits = tp_ops.mask_logits(loc_logits, mask)
# [bs, HH, WW]
loc_logits_flat = tfc_layers.flatten(loc_logits)
if scatter_ind is not None and scatter_bs is not None:
loc_logits_flat = tf.scatter_nd(
tf.expand_dims(scatter_ind, axis=-1),
loc_logits_flat,
shape=[scatter_bs] + loc_logits_flat.shape[1:]
)
# [bs, H*W]
return to_action_head(loc_logits_flat, pdtype_cls, temperature=temperature)
def _ptr_decode(y, memory, num_dec_blocks, ff_dim, enc_dim, training=True):
with tf.variable_scope("decoder"):
dec_logits, dec_pd = [], []
dec = y
# Blocks
for i in range(num_dec_blocks):
with tf.variable_scope("num_blocks_{}".format(i)):
if i < num_dec_blocks - 1:
# Vanilla attention
dec = multihead_attention(queries=dec,
keys=memory,
values=memory,
dropout_rate=0.0,
training=training,
causality=False,
scope="vanilla_attention")
# Feed Forward
dec = ff(dec, num_units=[ff_dim, enc_dim])
else:
# pointer attention
dec_logits, dec_pd = multihead_attention(
queries=dec,
keys=memory,
values=memory,
dropout_rate=0.0,
training=training,
causality=False,
pointer=True,
scope="pointer_attention")
return dec_logits, dec_pd
@add_arg_scope
def ptr_action_head(inputs_query,
inputs_ptr_mask,
inputs_entity_embed,
ptr_out_dim,
num_dec_blocks,
ff_dim,
enc_dim,
pdtype_cls,
temperature=1.0,
scatter_ind=None,
scatter_bs=None,
scope=None):
""" Pointer-Network action head.
Args:
inputs_query: [bs, some dim]
inputs_ptr_mask: [bs, 600]
inputs_entity_embed: [bs, some dim]
ptr_out_dim:
num_dec_blocks:
ff_dim:
enc_dim:
pdtype_cls:
scope:
Returns:
An outputs `Tensor`.
"""
with tf.variable_scope(scope, default_name='ptr_head'):
select_logits, select_prob = _ptr_decode(
y=inputs_query,
memory=inputs_entity_embed,
num_dec_blocks=num_dec_blocks,
ff_dim=ff_dim,
enc_dim=enc_dim,
training=False
)
select_logits = tf.reshape(select_logits, [-1, ptr_out_dim])
select_logits = tp_ops.mask_logits(logits=select_logits, mask=inputs_ptr_mask)
if scatter_ind is not None and scatter_bs is not None:
select_logits = tf.scatter_nd(tf.expand_dims(scatter_ind, axis=-1), select_logits,
shape=[scatter_bs] + select_logits.shape[1:])
return to_action_head(select_logits, pdtype_cls, temperature=temperature)
@add_arg_scope
def ptr_action_head_v2(inputs_query,
inputs_ptr_mask,
inputs_entity_embed,
inputs_func_embed,
ptr_out_dim,
pdtype_cls,
temperature=1.0,
scatter_ind=None,
scatter_bs=None,
scope=None):
""" Pointer-Network action head.
Args:
inputs_query: [bs, some dim]
inputs_ptr_mask: [bs, 600]
inputs_entity_embed: [bs, some dim]
ptr_out_dim:
num_dec_blocks:
ff_dim:
enc_dim:
pdtype_cls:
scope:
Returns:
An outputs `Tensor`.
"""
with tf.variable_scope(scope, default_name='ptr_head'):
inputs_query = tfc_layers.fully_connected(inputs_query, 256, activation_fn=None)
inputs_query += tf.expand_dims(inputs_func_embed, axis=1)
inputs_query = tf.nn.relu(inputs_query)
inputs_query = tfc_layers.fully_connected(inputs_query, 32, activation_fn=None) # per AStar
projected_keys = tfc_layers.fully_connected(inputs_entity_embed, 32, activation_fn=None)
# attentions (= queries * keys) as logits
tar_logits = tf.reduce_sum(inputs_query * projected_keys, axis=-1)
tar_logits = tp_ops.mask_logits(logits=tar_logits, mask=inputs_ptr_mask)
if scatter_ind is not None and scatter_bs is not None:
tar_logits = tf.scatter_nd(tf.expand_dims(scatter_ind, axis=-1), tar_logits,
shape=[scatter_bs] + tar_logits.shape[1:])
return to_action_head(tar_logits, pdtype_cls, temperature=temperature)
@add_arg_scope
def multinomial_action_head(inputs,
inputs_select_mask,
temperature=1.0,
scatter_ind=None,
scatter_bs=None,
scope=None):
"""Multinominal action head. (i.e., lxhan's make_multi_bi_head)
Args:
inputs: [bs, some dim]
inputs_select_mask: [bs, 600]
pdtype_cls:
scope:
Returns:
An outputs `Tensor`.
"""
n_action_states = 2 # whether the action is executed or not
with tf.variable_scope(scope, default_name='multinomial_action_head'):
# this code block should not be here
# query_h = dense_sum_blocks(inputs=inputs_query, n=4, enc_dim=enc_dim,
# scope='q_res_blk')
# query_h = tf.expand_dims(query_h, axis=1)
# query_h = tf.tile(query_h, multiples=[
# 1, tf.shape(inputs_entity_embed)[1], 1])
# head_h = tf.concat([inputs_entity_embed, query_h], axis=-1)
# head_h = dense_sum_blocks(inputs=head_h, n=4, enc_dim=enc_dim,
# scope='eq_res_blk')
head_logits = tfc_layers.fully_connected(inputs,
n_action_states,
scope='logits',
activation_fn=None,
normalizer_fn=None)
# modify the logits that unavailable position will be -inf
neginf = tf.zeros_like(inputs_select_mask, dtype=tf.float32) - INF
offset = tf.where(inputs_select_mask,
tf.zeros_like(inputs_select_mask, dtype=tf.float32),
neginf)
offset = tf.expand_dims(offset, axis=-1)
offset = tf.concat([tf.zeros_like(offset), offset], axis=-1)
head_logits += offset
# hack to flatten ms's logits and reduce_sum neglogp, entropy;
# otherwise, should use MultiCategoricalPd, which, however, is
# slow by creating multiply CategoricalPd instances
## TODO: hack for backward compatibility, remove this later
head_logits = head_logits[:, :, 1] - head_logits[:, :, 0]
if scatter_ind is not None and scatter_bs is not None:
head_logits = tf.scatter_nd(tf.expand_dims(scatter_ind, axis=-1), head_logits,
shape=[scatter_bs] + head_logits.shape[1:])
ms_head = to_action_head(head_logits, BernoulliPdType, temperature=temperature)
# ms_head = ActionHead(
# logits=tf.reshape(ms_head.logits, [-1, tf.reduce_prod(tf.shape(ms_head.logits)[1:])]),
# argmax=ms_head.argmax,
# sam=ms_head.sam,
# neglogp=tf.reduce_sum(ms_head.neglogp, axis=-1),
# pd=ms_head.pd,
# ent=tf.reduce_sum(ms_head.ent, axis=-1))
return ms_head
@add_arg_scope
def sequential_selection_head(inputs,
inputs_select_mask,
input_keys,
input_selections,
max_num=64,
temperature=1.0,
forget_bias=0.0,
weights_initializer=ortho_init(1.0),
biases_initializer=tf.constant_initializer(0.0),
weights_regularizer=None,
biases_regularizer=None,
use_layer_norm=False,
scope=None):
"""Sequential Selection head using lstm and pointer network.
Args:
inputs: [bs, some dim], the input embeddings
inputs_select_mask: [bs, unit_num], the last item is end_selection
input_keys: [bs, unit_num, key_dim]
input_selections: outer-fed selection samples, i.e., the labels or ground
truths
Returns:
A head structure.
An updated `selected units embedding` with the same shape of the inputs.
"""
# make pre embedding
nbatch, nin = inputs.get_shape().as_list()
n_embed = 256
_, unit_num, nh = input_keys.get_shape().as_list()
# unit_num serve as the End Of Selection <EOS> token
# nh is the dim of key and lstm's hidden state
expand_mask = tf.cast(tf.expand_dims(inputs_select_mask, -1), tf.float32)
mean_key = (tf.reduce_sum(input_keys * expand_mask, axis=[1])
/ (tf.reduce_sum(expand_mask, axis=[1]) + 1e-8))
# make keys with end key
end_key = tf.get_variable("end_key", [1, 1, nh],
initializer=tf.constant_initializer(0.2),
regularizer=None)
input_keys = tf.concat([input_keys,
tf.tile(end_key, [nbatch, 1, 1])], axis=1)
# make mask with terminal state
inputs_select_mask = tf.concat(
[inputs_select_mask, tf.constant([[True]] * nbatch, tf.bool)], axis=1)
with tf.variable_scope(scope, default_name='sequential_selection_head'):
with tf.variable_scope(scope, default_name='lstm'):
# weights & biases
wx = tf.get_variable("wx", [n_embed, nh * 4], initializer=weights_initializer,
regularizer=weights_regularizer)
wh = tf.get_variable("wh", [nh, nh * 4], initializer=weights_initializer,
regularizer=weights_regularizer)
b = tf.get_variable("b", [nh * 4], initializer=biases_initializer,
regularizer=biases_regularizer)
wkey = tf.get_variable("wkey", [nh, nin], initializer=weights_initializer,
regularizer=weights_regularizer)
with tf.variable_scope('embed', reuse=tf.AUTO_REUSE) as sc_embed:
pass
# normalization function
x_nf, h_nf, c_nf = None, None, None
if use_layer_norm:
with tf.variable_scope('x_ln', reuse=tf.AUTO_REUSE) as sc:
x_nf = partial(ln, epsilon=1e-5, enable_openai_impl=True, scope=sc)
with tf.variable_scope('h_ln', reuse=tf.AUTO_REUSE) as sc:
h_nf = partial(ln, epsilon=1e-5, enable_openai_impl=True, scope=sc)
with tf.variable_scope('c_ln', reuse=tf.AUTO_REUSE) as sc:
c_nf = partial(ln, epsilon=1e-5, enable_openai_impl=True, scope=sc)
c = tf.constant(0.0, shape=[nbatch, nh], dtype=tf.float32)
h = tf.constant(0.0, shape=[nbatch, nh], dtype=tf.float32)
samples = []
logits_list = []
outer_fed_selection = input_selections is not None
inputs_list = [inputs]
select_masks = []
for idx in range(max_num):
select_masks.append(inputs_select_mask)
s_embed = tfc_layers.fully_connected(inputs, n_embed, scope=sc_embed)
c, h = one_step_lstm_op(c, h, s_embed, wx, wh, b,
forget_bias, x_nf, h_nf, c_nf)
# attentions (= queries * keys) as logits
logits = tf.reduce_sum(tf.expand_dims(h, axis=1) * input_keys, axis=-1)
masked_logits = tp_ops.mask_logits(logits, inputs_select_mask)/temperature
logits_list.append(masked_logits)
if outer_fed_selection:
sample = input_selections[:, idx]
else:
# sample from logits
sample = cat_sample_from_logits(masked_logits)
samples.append(sample)
# update the input embedding
index = tf.stack([tf.range(nbatch), sample], axis=1)
inputs += tf.matmul(tf.gather_nd(input_keys, index) - mean_key, wkey)
inputs_list.append(inputs)
# update the selection mask
inputs_select_mask = tf.tensor_scatter_nd_update(inputs_select_mask,
index, [False]*nbatch)
# Mask all the units except <EOS> if the selection already ends
with tf.xla.experimental.jit_scope(compile_ops=False):
end_ind = tf.cast(tf.where(tf.equal(sample, unit_num)), tf.int32)
inputs_select_mask = tf.tensor_scatter_update(
inputs_select_mask, end_ind,
tf.concat([tf.zeros_like(end_ind, dtype=tf.bool)] * unit_num
+ [tf.ones_like(end_ind, dtype=tf.bool)], axis=1)
)
samples = tf.stack(samples, axis=1)
select_masks = tf.stack(select_masks, axis=1)
# finding the first <EOS> unit
mask = tf.not_equal(samples, tf.constant(unit_num, tf.int32))
end_indices = tf.math.minimum(
tf.reduce_sum(tf.cast(mask, tf.int32), axis=1), max_num-1)
loss_mask = tf.tensor_scatter_nd_update(
mask, tf.stack([tf.range(nbatch), end_indices], axis=1), [True]*nbatch)
# update the input embedding using the output selected units
embed = tf.gather_nd(inputs_list,
tf.stack([end_indices, tf.range(nbatch)], axis=1))
logits = tf.stack(logits_list, axis=1)
labels = None
if outer_fed_selection:
x = tf.one_hot(samples, unit_num+1)
s = tf.reduce_sum(x, axis=1, keepdims=True)
select_labels = tf.concat([s[:, :, :-1], tf.zeros([nbatch, 1, 1])],
axis=-1) * tf.cast(select_masks, tf.float32)
end_labels = tf.concat([tf.zeros([nbatch, max_num, unit_num]),
tf.ones([nbatch, max_num, 1])], axis=-1)
labels = tf.where_v2(tf.expand_dims(mask, axis=-1),
select_labels, end_labels)
labels = labels / tf.reduce_sum(labels, axis=-1, keepdims=True)
samples = None
head = to_action_head(logits, MaskSeqCategoricalPdType, nseq=max_num,
mask=loss_mask, labels=labels, sample=samples)
return head, embed
@add_arg_scope
def sequential_selection_head_v2(inputs,
inputs_select_mask,
input_keys,
input_selections,
input_func_embed,
max_num=64,
temperature=1.0,
forget_bias=0.0,
weights_initializer=ortho_init(1.0),
biases_initializer=tf.constant_initializer(0.0),
weights_regularizer=None,
biases_regularizer=None,
use_layer_norm=False,
scope=None):
"""Sequential Selection head using lstm and pointer network.
Args:
inputs: [bs, some dim], the input embeddings
inputs_select_mask: [bs, unit_num], the last item is end_selection
input_keys: [bs, unit_num, key_dim]
input_selections: outer-fed selection samples, i.e., the labels or ground
truths
Returns:
A head structure.
An updated `selected units embedding` with the same shape of the inputs.
"""
# make pre embedding
nbatch, nin = inputs.get_shape().as_list()
n_embed = 256
_, unit_num, nh = input_keys.get_shape().as_list()
# unit_num serve as the End Of Selection <EOS> token
# nh is the dim of key and lstm's hidden state
expand_mask = tf.cast(tf.expand_dims(inputs_select_mask, -1), tf.float32)
mean_key = (tf.reduce_sum(input_keys * expand_mask, axis=[1])
/ (tf.reduce_sum(expand_mask, axis=[1]) + 1e-8))
# make keys with end key
end_key = tf.get_variable("end_key", [1, 1, nh],
initializer=tf.constant_initializer(0.2),
regularizer=None)
input_keys = tf.concat([input_keys,
tf.tile(end_key, [nbatch, 1, 1])], axis=1)
# make mask with terminal state
inputs_select_mask = tf.concat(
[inputs_select_mask, tf.constant([[True]] * nbatch, tf.bool)], axis=1)
with tf.variable_scope(scope, default_name='sequential_selection_head'):
with tf.variable_scope(scope, default_name='lstm'):
# weights & biases
wx = tf.get_variable("wx", [32, nh * 4], initializer=weights_initializer,
regularizer=weights_regularizer)
wh = tf.get_variable("wh", [nh, nh * 4], initializer=weights_initializer,
regularizer=weights_regularizer)
b = tf.get_variable("b", [nh * 4], initializer=biases_initializer,
regularizer=biases_regularizer)
wkey = tf.get_variable("wkey", [nh, nin], initializer=weights_initializer,
regularizer=weights_regularizer)
with tf.variable_scope('embed_fc1', reuse=tf.AUTO_REUSE) as sc_embed_fc1:
pass
with tf.variable_scope('embed_fc2', reuse=tf.AUTO_REUSE) as sc_embed_fc2:
pass
# normalization function
x_nf, h_nf, c_nf = None, None, None
if use_layer_norm:
with tf.variable_scope('x_ln', reuse=tf.AUTO_REUSE) as sc:
x_nf = partial(ln, epsilon=1e-5, enable_openai_impl=True, scope=sc)
with tf.variable_scope('h_ln', reuse=tf.AUTO_REUSE) as sc:
h_nf = partial(ln, epsilon=1e-5, enable_openai_impl=True, scope=sc)
with tf.variable_scope('c_ln', reuse=tf.AUTO_REUSE) as sc:
c_nf = partial(ln, epsilon=1e-5, enable_openai_impl=True, scope=sc)
c = tf.constant(0.0, shape=[nbatch, nh], dtype=tf.float32)
h = tf.constant(0.0, shape=[nbatch, nh], dtype=tf.float32)
samples = []
logits_list = []
outer_fed_selection = input_selections is not None
inputs_list = [inputs]
select_masks = []
for idx in range(max_num):
select_masks.append(inputs_select_mask)
s_embed = tfc_layers.fully_connected(inputs, n_embed, activation_fn=None, scope=sc_embed_fc1)
s_embed += input_func_embed
s_embed = tf.nn.relu(s_embed)
s_embed = tfc_layers.fully_connected(s_embed, 32, activation_fn=None, scope=sc_embed_fc2) # per AStar
c, h = one_step_lstm_op(c, h, s_embed, wx, wh, b,
forget_bias, x_nf, h_nf, c_nf)
# attentions (= queries * keys) as logits
logits = tf.reduce_sum(tf.expand_dims(h, axis=1) * input_keys, axis=-1)
masked_logits = tp_ops.mask_logits(logits, inputs_select_mask)/temperature
logits_list.append(masked_logits)
if outer_fed_selection:
sample = input_selections[:, idx]
else:
# sample from logits
sample = cat_sample_from_logits(masked_logits)
samples.append(sample)
# update the input embedding
index = tf.stack([tf.range(nbatch), sample], axis=1)
inputs += tf.matmul(tf.gather_nd(input_keys, index) - mean_key, wkey)
inputs_list.append(inputs)
# update the selection mask
inputs_select_mask = tf.tensor_scatter_nd_update(inputs_select_mask,
index, [False]*nbatch)
# Mask all the units except <EOS> if the selection already ends
with tf.xla.experimental.jit_scope(compile_ops=False):
end_ind = tf.cast(tf.where(tf.equal(sample, unit_num)), tf.int32)
inputs_select_mask = tf.tensor_scatter_update(
inputs_select_mask, end_ind,
tf.concat([tf.zeros_like(end_ind, dtype=tf.bool)] * unit_num
+ [tf.ones_like(end_ind, dtype=tf.bool)], axis=1)
)
samples = tf.stack(samples, axis=1)
select_masks = tf.stack(select_masks, axis=1)
# finding the first <EOS> unit
mask = tf.not_equal(samples, tf.constant(unit_num, tf.int32))
end_indices = tf.math.minimum(
tf.reduce_sum(tf.cast(mask, tf.int32), axis=1), max_num-1)
loss_mask = tf.tensor_scatter_nd_update(
mask, tf.stack([tf.range(nbatch), end_indices], axis=1), [True]*nbatch)
# update the input embedding using the output selected units
embed = tf.gather_nd(inputs_list,
tf.stack([end_indices, tf.range(nbatch)], axis=1))
logits = tf.stack(logits_list, axis=1)
labels = None
if outer_fed_selection:
x = tf.one_hot(samples, unit_num+1)
s = tf.reduce_sum(x, axis=1, keepdims=True)
select_labels = tf.concat([s[:, :, :-1], tf.zeros([nbatch, 1, 1])],
axis=-1) * tf.cast(select_masks, tf.float32)
end_labels = tf.concat([tf.zeros([nbatch, max_num, unit_num]),
tf.ones([nbatch, max_num, 1])], axis=-1)
labels = tf.where_v2(tf.expand_dims(mask, axis=-1),
select_labels, end_labels)
labels = labels / tf.reduce_sum(labels, axis=-1, keepdims=True)
samples = None
head = to_action_head(logits, MaskSeqCategoricalPdType, nseq=max_num,
mask=loss_mask, labels=labels, sample=samples)
return head, embed
@add_arg_scope
def dot_prod_attention(values, query, mask):
a = tf.stack([tf.reduce_sum(tf.multiply(v, query), axis=-1)
for v in values], -1)
w = tf.nn.softmax(tp_ops.mask_logits(a, mask))
ws = tf.unstack(w, axis=-1)
res = tf.add_n([tf.multiply(v, tf.expand_dims(ww, axis=-1))
for v, ww in zip(values, ws)]) # add_n'n indicates num of values
return res
@add_arg_scope
def lstm_embed_block(inputs_x, inputs_hs, inputs_mask, nc,
outputs_collections=None):
""" lstm embedding block.
Args
inputs_x: current state - (nrollout*rollout_len, input_dim)
inputs_hs: hidden state - (nrollout*rollout_len, hs_len), NOTE: it's the
states at every time steps of the rollout.
inputs_mask: hidden state mask - (nrollout*rollout_len,)
nc:
Returns
A Tensor, the lstm embedding outputs - (nrollout*rollout_len, out_idm)
A Tensor, the new hidden state - (nrollout, hs_len), NOTE: it's the state at
a single time step.
"""
def consist_seq_dropout(input_seq):
assert isinstance(input_seq, list)
dropout_mask = tf.nn.dropout(tf.ones(shape=[nc.nrollout,
input_seq[0].shape[-1]],
dtype=tf.float32),
keep_prob=1 - nc.lstm_dropout_rate)
return [x * dropout_mask for x in input_seq]
with tf.variable_scope('lstm_embed') as sc:
# to list sequence and call the lstm cell
x_seq = tp_ops.batch_to_seq(inputs_x, nc.nrollout, nc.rollout_len)
# add dropout before LSTM cell TODO(pengsun): use tf.layers.dropout?
if 1 > nc.lstm_dropout_rate > 0 and not nc.test:
x_seq = consist_seq_dropout(x_seq)
hsm_seq = tp_ops.batch_to_seq(tp_ops.to_float32(inputs_mask),
nc.nrollout, nc.rollout_len)
inputs_hs = tf.reshape(inputs_hs, [nc.nrollout, nc.rollout_len, nc.hs_len])
initial_hs = inputs_hs[:, 0, :]
if nc.lstm_cell_type == 'lstm':
lstm_embed, hs_new = lstm(inputs_x_seq=x_seq,
inputs_terminal_mask_seq=hsm_seq,
inputs_state=initial_hs,
nh=nc.nlstm,
forget_bias=nc.forget_bias,
use_layer_norm=nc.lstm_layer_norm,
scope='lstm')
elif nc.lstm_cell_type == 'k_lstm':
lstm_embed, hs_new = k_lstm(inputs_x_seq=x_seq,
inputs_termial_mask_seq=hsm_seq,
inputs_state=initial_hs,
nh=nc.nlstm,
k=nc.lstm_duration,
forget_bias=nc.forget_bias,
use_layer_norm=nc.lstm_layer_norm,
scope='k_lstm')
else:
raise NotImplementedError('unknown cell_type {}'.format(nc.lstm_cell_type))
# add dropout after LSTM cell
if 1 > nc.lstm_dropout_rate > 0 and not nc.test:
lstm_embed = consist_seq_dropout(lstm_embed)
lstm_embed = tp_ops.seq_to_batch(lstm_embed)
return (
lutils.collect_named_outputs(outputs_collections, sc.name + '_out',
lstm_embed),
lutils.collect_named_outputs(outputs_collections, sc.name + '_hs', hs_new)
)
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.