date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | ma-rista/NutriScanPlanner | gpt_chat~gpt_chat.py | import gradio as gr
import openai
import random
import time
# OpenAI ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ API ํค ์ค์
openai.api_key = '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'
# ์ด๊ธฐ ๋ํ ๊ธฐ๋ก์ ๋น ๋ฆฌ์คํธ๋ก ์ค์
initial_history = []
def chatbot_response(message, history):
# ๋ํ ๊ธฐ๋ก์ OpenAI ํ์์ผ๋ก ๋ณํ
messages = [{"role": "user", "content": pair[0]} for pair in history] + [{"role": "assistant", "content": pair[1]} for pair in history]
messages.append({"role": "user", "content": message})
# OpenAI์ ์์ฒญ์ ๋ณด๋
๋๋ค.
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# OpenAI๋ก๋ถํฐ ๋ฐ์ ์๋ต์ ๋ด์ฉ์ ์ฌ๋ฐ๋ฅด๊ฒ ์ถ์ถํฉ๋๋ค.
bot_message = response.choices[0].message.content
return bot_message, history
def chatbot_interface():
chatbot = gr.Chatbot()
msg = gr.Textbox()
def respond(message, chat_history):
bot_message, updated_history = chatbot_response(message, chat_history)
updated_history.append((message, bot_message))
return bot_message, updated_history
with gr.Blocks():
msg.submit(respond, [chatbot, msg], [chatbot, msg])
return gr.Interface(fn=respond, inputs=msg, outputs=chatbot, live=True, title="ChatGPT ๊ธฐ๋ฐ ์ฑํ
๋ด")
if __name__ == "__main__":
demo = chatbot_interface()
demo.launch()
| [
"[{'role': 'user', 'content': 'P'}]"
] |
2024-01-10 | ma-rista/NutriScanPlanner | diet_planner~diet_planner_module.py | import gradio as gr
import openai
import deepl
from openai import OpenAI
# OpenAI ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ API ํค ์ค์
openai.api_key = ''
client = OpenAI(api_key='')
# DeepL API ์ธ์ฆ ํค ์ค์
auth_key = "6309462f-ad40-dba2-f27f-e297c462fcd9:fx"
translator = deepl.Translator(auth_key)
def translate_text_with_deepl(text, target_language="KO"):
try:
result = translator.translate_text(text, target_lang=target_language)
return result.text
except deepl.DeepLException as error:
print(error)
return text # ๋ฒ์ญ์ ์คํจํ ๊ฒฝ์ฐ ์๋ฌธ ๋ฐํ
def generate_diet_plan(calories, ingredients, cuisine, dietary_restrictions, allergies, medical_conditions, meals_per_day, cooking_preference):
# ์ฑํ
ํ์์ ๋ฉ์์ง ์์ฑ
messages = [
{"role": "system",
# "content": "Use Markdown formatting to create meal plans. You are a nutrition expert. Your task is to develop meal plans that meet the user's specified dietary needs. Your responses should be detailed, structured, and informative, utilizing Markdown tables to present the meal plan. Make sure to consider the user's calorie goals, preferred ingredients, dietary restrictions, and the number of meals per day. Provide a breakdown of each meal with nutritional information such as calorie content and macronutrients."},
"content":"์๋จ ๊ณํ์ ๋งํฌ๋ค์ด ํ์์ผ๋ก ์์ฑํ์ธ์.๋น์ ์ ์์ ์ ๋ฌธ๊ฐ์
๋๋ค.์ฌ์ฉ์๊ฐ ์ง์ ํ ์๋จ ์๊ตฌ ์ฌํญ์ ์ถฉ์กฑ์ํค๋ ์๋จ ๊ณํ์ ๊ฐ๋ฐํ๋ ๊ฒ์ด ๋น์ ์ ์๋ฌด์
๋๋ค.๋ต๋ณ์ ์์ธํ๊ณ ๊ตฌ์กฐํ๋๋ฉฐ ์ ์ตํด์ผ ํ๋ฉฐ,์๋จ ๊ณํ์ ์ ์ํ๋ ๋ฐ ๋งํฌ๋ค์ด ํ๋ฅผ ์ฌ์ฉํด์ผ ํฉ๋๋ค.์ฌ์ฉ์์ ์นผ๋ก๋ฆฌ ๋ชฉํ, ์ ํธ ์ฌ๋ฃ, ์์ด ์ ํ, ํ๋ฃจ ์์ฌ ํ์๋ฅผ ๊ณ ๋ คํ์ธ์.๊ฐ ์์ฌ์ ๋ํ ๋ถ์์ ์ ๊ณตํ๋ฉฐ, ์นผ๋ก๋ฆฌ ํจ๋ ๋ฐ ์ฃผ์์์์์ ๊ฐ์ ์์ ์ ๋ณด๋ฅผ ํฌํจ์ํค์ธ์."},
{"role": "user", "content": f"Create a diet plan with the following requirements:\n{calories}: Your target calorie count for the day.\n{ingredients}: The ingredients that make up your diet (we'll use the best we can, but you're welcome to make other suggestions)\n{cuisine}: Your preferred food style\n{dietary_restrictions}: Food groups you want to limit (dietary restrictions)\n{allergies}: Allergies and intolerances\n{medical_conditions}: Diseases or medical conditions you suffer from.\n{meals_per_day}: Number of meals you want to eat per day\n{cooking_preference}: Preferred cooking time."},
{"role": "assistant", "content": f"""
ํค์ ์ฒด์ค์ ๊ณ ๋ คํ์ฌ ์ด๋์ ์กฐ์ ํ๊ณ , ๋จ๋ฐฑ์ง ์ญ์ทจ๋์ 100~120g์ผ๋ก ๋ง์ถ๊ธฐ ์ํด ์๋จ์ ์กฐ์ ํ๊ฒ ์ต๋๋ค. ์๋๋ ์กฐ์ ๋ ์๋จ์ ์์์
๋๋ค. ์ค์ ์๋จ์ ์ธ๋ถ ์ฌํญ์ ๊ฐ ์์์ ํฌ๊ธฐ, ์กฐ๋ฆฌ ๋ฐฉ๋ฒ์ ๋ฐ๋ผ ๋ฌ๋ผ์ง ์ ์์ต๋๋ค.
|์์ฌ |์์ |์ |์ด๋ (kcal) |
|----|---|---|----|
์์นจ ์์ฌ|์คํฌ๋จ๋ธ ์๊ทธ์ ์ผ์ฑ |2๊ฐ, ์ผ์ฑ ์ถ๊ฐ|300|18|
**ํฉ๊ณ**
- ์ด๋: ์ฝ 2200 kcal
- ๋จ๋ฐฑ์ง: 100~120g (๋ณ๋ ๊ฐ๋ฅ)
"""
},
# ์ถ๊ฐ ์ฌ์ฉ์ ๋ฐ ์ด์์คํดํธ ๋ฉ์์ง๊ฐ ํ์ํ ๊ฒฝ์ฐ ์ฌ๊ธฐ์ ํฌํจ์ํต๋๋ค.
]
# GPT API ํธ์ถ
completion = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=messages
)
# ๊ฒฐ๊ณผ๋ฅผ ๋งํฌ๋ค์ด ํ์์ผ๋ก ๋ณํ
diet_plan = completion.choices[0].message.content
# translated_diet_plan = translate_text_with_deepl(diet_plan, "KO")
# markdown_format = f"# ์์ฑ๋ ์๋จ ๊ณํ\n\n{translated_diet_plan}"
# markdown_format = f"# ์์ฑ๋ ์๋จ ๊ณํ\n\n{diet_plan}"
return gr.Markdown(value = diet_plan)
# Gradio ์ธํฐํ์ด์ค ์ ์ ํจ์
def create_diet_planner_interface():
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
# ์
๋ ฅ ์ปดํฌ๋ํธ ๊ตฌ์ฑ
calories = gr.Number(label="TDEE ๊ณ์ฐ๊ธฐ๋ก ์
๋ ฅ๋ฐ์ ์นผ๋ก๋ฆฌ")
ingredients = gr.Textbox(label="์์ฌ๋ฃ")
cuisine = gr.CheckboxGroup(choices=["ํ์", "์ค์", "์์"], label="์นดํ
๊ณ ๋ฆฌ")
dietary_restrictions = gr.CheckboxGroup(choices=["์ฑ์", "์ ํ์ํ๋ฌผ"], label="์์ด ์ ํ")
allergies = gr.CheckboxGroup(choices=["๋
์ฝฉ", "์ฐ์ ", "๊ธ๋ฃจํ
"], label="์๋ ๋ฅด๊ธฐ ๋ฐ ๋ถ๋ด์ฑ")
medical_conditions = gr.CheckboxGroup(choices=["๋น๋จ๋ณ", "๊ณ ํ์"], label="์๋ฃ ์ํ")
meals_per_day = gr.Radio(choices=["2๋ผ", "3๋ผ", "4๋ผ"], label="ํ๋ฃจ ๋ช ๋ผ ์ญ์ทจ")
cooking_preference = gr.CheckboxGroup(choices=["๊ฐ๋จํ ์กฐ๋ฆฌ", "๊ธด ์กฐ๋ฆฌ ์๊ฐ"], label="์กฐ๋ฆฌ ์๊ฐ ๋ฐ ์ฉ์ด์ฑ")
submit_button = gr.Button("์๋จ ์์ฑ")
with gr.Column():
# ๊ฒฐ๊ณผ ์ถ๋ ฅ
result = gr.Markdown()
submit_button.click(
generate_diet_plan,
inputs=[calories, ingredients, cuisine, dietary_restrictions, allergies, medical_conditions, meals_per_day, cooking_preference],
outputs=result
)
return demo, translate_text_with_deepl, generate_diet_plan
# ์ธํฐํ์ด์ค ์์ฑ ํจ์ ํธ์ถ
diet_planner_interface = create_diet_planner_interface()
| [
"์๋จ ๊ณํ์ ๋งํฌ๋ค์ด ํ์์ผ๋ก ์์ฑํ์ธ์.๋น์ ์ ์์ ์ ๋ฌธ๊ฐ์
๋๋ค.์ฌ์ฉ์๊ฐ ์ง์ ํ ์๋จ ์๊ตฌ ์ฌํญ์ ์ถฉ์กฑ์ํค๋ ์๋จ ๊ณํ์ ๊ฐ๋ฐํ๋ ๊ฒ์ด ๋น์ ์ ์๋ฌด์
๋๋ค.๋ต๋ณ์ ์์ธํ๊ณ ๊ตฌ์กฐํ๋๋ฉฐ ์ ์ตํด์ผ ํ๋ฉฐ,์๋จ ๊ณํ์ ์ ์ํ๋ ๋ฐ ๋งํฌ๋ค์ด ํ๋ฅผ ์ฌ์ฉํด์ผ ํฉ๋๋ค.์ฌ์ฉ์์ ์นผ๋ก๋ฆฌ ๋ชฉํ, ์ ํธ ์ฌ๋ฃ, ์์ด ์ ํ, ํ๋ฃจ ์์ฌ ํ์๋ฅผ ๊ณ ๋ คํ์ธ์.๊ฐ ์์ฌ์ ๋ํ ๋ถ์์ ์ ๊ณตํ๋ฉฐ, ์นผ๋ก๋ฆฌ ํจ๋ ๋ฐ ์ฃผ์์์์์ ๊ฐ์ ์์ ์ ๋ณด๋ฅผ ํฌํจ์ํค์ธ์.",
"Create a diet plan with the following requirements:\nPLACEHOLDER: Your target calorie count for the day.\nPLACEHOLDER: The ingredients that make up your diet (we'll use the best we can, but you're welcome to make other suggestions)\nPLACEHOLDER: Your preferred food style\nPLACEHOLDER: Food groups you want to limit (dietary restrictions)\nPLACEHOLDER: Allergies and intolerances\nPLACEHOLDER: Diseases or medical conditions you suffer from.\nPLACEHOLDER: Number of meals you want to eat per day\nPLACEHOLDER: Preferred cooking time.",
"\n ํค์ ์ฒด์ค์ ๊ณ ๋ คํ์ฌ ์ด๋์ ์กฐ์ ํ๊ณ , ๋จ๋ฐฑ์ง ์ญ์ทจ๋์ 100~120g์ผ๋ก ๋ง์ถ๊ธฐ ์ํด ์๋จ์ ์กฐ์ ํ๊ฒ ์ต๋๋ค. ์๋๋ ์กฐ์ ๋ ์๋จ์ ์์์
๋๋ค. ์ค์ ์๋จ์ ์ธ๋ถ ์ฌํญ์ ๊ฐ ์์์ ํฌ๊ธฐ, ์กฐ๋ฆฌ ๋ฐฉ๋ฒ์ ๋ฐ๋ผ ๋ฌ๋ผ์ง ์ ์์ต๋๋ค.\n |์์ฌ |์์ |์ |์ด๋ (kcal) |\n |----|---|---|----|\n ์์นจ ์์ฌ|์คํฌ๋จ๋ธ ์๊ทธ์ ์ผ์ฑ |2๊ฐ, ์ผ์ฑ ์ถ๊ฐ|300|18|\n\n **ํฉ๊ณ**\n\n - ์ด๋: ์ฝ 2200 kcal\n - ๋จ๋ฐฑ์ง: 100~120g (๋ณ๋ ๊ฐ๋ฅ)\n "
] |
2024-01-10 | tigershen23/llm-sandbox | qa_exploration~qa_exploration.py | # cSpell:disable
# Backing functions for Question-Answering exploration
import os
path = os.path.dirname(__file__)
from gpt_index import SimpleDirectoryReader, GPTSimpleVectorIndex
from langchain.agents import Tool, initialize_agent
from langchain.llms import OpenAI
from langchain import OpenAI
import streamlit as st
#region marketing site supporting code
# Set up document QA index
@st.experimental_singleton
def get_marketing_site_index():
saved_path = path + "/gpt_indexes/website/welcome_marketing.json"
if os.path.exists(saved_path):
return GPTSimpleVectorIndex.load_from_disk(saved_path)
else:
welcome_marketing_data = SimpleDirectoryReader(
path + "/data/website/welcome_marketing",
recursive=True,
required_exts=[".jsonl"],
).load_data()
welcome_marketing_index = GPTSimpleVectorIndex(welcome_marketing_data)
welcome_marketing_index.save_to_disk(saved_path)
return welcome_marketing_index
# Query DB
def query_marketing_site_db(query: str):
return get_marketing_site_index().query(query, verbose=True)
# Create LangChain agent
@st.experimental_memo
def get_marketing_site_agent():
tools = [
Tool(
name="QueryingDB",
func=query_marketing_site_db,
description="Returns most relevant answer from document for query string",
)
]
llm = OpenAI(temperature=0.0)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
return agent
#endregion
#region blog supporting code
# Set up document QA index
@st.experimental_singleton
def get_blog_index():
saved_path = path + "/gpt_indexes/website/welcome_blog.json"
if os.path.exists(saved_path):
return GPTSimpleVectorIndex.load_from_disk(saved_path)
else:
welcome_blog_data = SimpleDirectoryReader(
path + "/data/website/welcome_blog",
recursive=True,
required_exts=[".jsonl"],
).load_data()
welcome_blog_index = GPTSimpleVectorIndex(welcome_blog_data)
welcome_blog_index.save_to_disk(saved_path)
return welcome_blog_index
# Query DB
def query_blog_db(query: str):
return get_blog_index().query(query, verbose=True)
# Create LangChain agent
@st.experimental_memo
def get_blog_agent():
tools = [
Tool(
name="QueryingDB",
func=query_blog_db,
description="Returns most relevant answer from document for query string",
)
]
llm = OpenAI(temperature=0.0)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
return agent
#endregion
#region Zendesk supporting code
# Set up document QA index
@st.experimental_singleton
def get_zendesk_index():
saved_path = path + "/gpt_indexes/website/welcome_zendesk.json"
if os.path.exists(saved_path):
return GPTSimpleVectorIndex.load_from_disk(saved_path)
else:
welcome_zendesk_data = SimpleDirectoryReader(
path + "/data/website/welcome_zendesk/2023-02-06",
required_exts=[".html"],
).load_data()
welcome_zendesk_index = GPTSimpleVectorIndex(welcome_zendesk_data)
welcome_zendesk_index.save_to_disk(saved_path)
return welcome_zendesk_index
# Query DB
def query_zendesk_db(query: str):
return get_zendesk_index().query(query, verbose=True)
# Create LangChain agent
@st.experimental_memo
def get_zendesk_agent():
tools = [
Tool(
name="QueryingDB",
func=query_zendesk_db,
description="Returns most relevant answer from document for query string",
)
]
llm = OpenAI(temperature=0.0)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
return agent
#endregion
#region transcripts supporting code
def get_webinar_name_from_filename(full_path):
# Get the filename from the full path
filename = full_path.split('/')[-1]
# Split the filename by underscore and remove the first element (the number)
parts = filename.split('_')[1:]
# Remove the '.srt' extension from the last element
parts[-1] = parts[-1].replace('.srt', '')
# Join the remaining elements with a space to create the desired output
webinar_name = ' '.join(parts)
return { "webinar_name": webinar_name }
@st.experimental_singleton
def get_transcripts_index():
saved_path = path + "/gpt_indexes/transcripts/org_243.json"
if os.path.exists(saved_path):
return GPTSimpleVectorIndex.load_from_disk(saved_path)
else:
transcripts_data = SimpleDirectoryReader(
path + "/data/transcripts",
required_exts=[".srt"],
file_metadata=get_webinar_name_from_filename,
recursive=True
).load_data()
transcripts_index = GPTSimpleVectorIndex(transcripts_data)
transcripts_index.save_to_disk(saved_path)
return transcripts_index
def query_transcripts_db(query: str):
return get_transcripts_index().query(query, verbose=True)
@st.experimental_memo
def get_transcripts_agent():
tools = [
Tool(
name="QueryingDB",
func=query_transcripts_db,
description="Returns most relevant answer from document for query string",
)
]
llm = OpenAI(temperature=0.0)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
return agent
#endregion
#region combined supporting code
# Set up document QA index
@st.experimental_singleton
def get_combined_index():
saved_path = path + "/gpt_indexes/website/welcome_combined.json"
if os.path.exists(saved_path):
return GPTSimpleVectorIndex.load_from_disk(saved_path)
else:
welcome_combined_data = SimpleDirectoryReader(
path + "/data/website",
recursive=True,
required_exts=[".jsonl"],
).load_data()
welcome_combined_index = GPTSimpleVectorIndex(welcome_combined_data)
welcome_combined_index.save_to_disk(saved_path)
return welcome_combined_index
# Query DB
def query_combined_db(query: str):
return get_combined_index().query(query, verbose=True)
# Create LangChain agent
@st.experimental_memo
def get_combined_agent():
tools = [
Tool(
name="QueryingDB",
func=query_combined_db,
description="Returns most relevant answer from document for query string",
)
]
llm = OpenAI(temperature=0.0)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
return agent
#endregion
| [] |
2024-01-10 | VCasecnikovs/RAGAgainstTheMachine | chatting.py | from enum import Enum
from typing import Any
from dotenv import load_dotenv
from openai import OpenAI
from pydantic import BaseModel
def get_openAI_client():
load_dotenv()
client = OpenAI()
return client
class Role(str, Enum):
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
class ChatMessage(BaseModel):
role: Role
content: str
def chat_inference(
messages: list[ChatMessage],
client: OpenAI,
model="gpt-4-1106-preview",
):
formatted_messages = []
for message in messages:
formatted_messages.append(
{
"role": message.role,
"content": message.content,
}
)
completion = client.chat.completions.create(
response_format={"type": "json_object"},
model=model,
messages=[
*formatted_messages,
],
)
model_answer = completion.choices[0].message.content
return model_answer
| [] |
2024-01-10 | cherifbenham/generative_ai | packages~pc_enhance~pc_qa_docs.py | import openai
import os
from datasets import load_dataset
from tqdm.auto import tqdm
import pinecone
from time import sleep
from datasets import load_dataset
#dataset name
dataset='jamescalam/youtube-transcriptions'
#new index to create
index_name = 'openai-youtube-transcriptions'
#openai credentials
openai.api_key = os.getenv("CHATGPT_API_KEY")
embed_model = "text-embedding-ada-002"
dimension_embedding=1536
#pinecone variables
environment="us-east1-gcp"
api_key="8ff9b8af-efae-48f0-985b-3298de8e36c9"
limit = 3750
query="Which training method should I use for sentence transformers when I only have pairs of related sentences?"
def complete(prompt):
# query text-davinci-003
res = openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=0,
max_tokens=400,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=None
)
return res['choices'][0]['text'].strip()
def load_data(dataset):
data = load_dataset(dataset, split='train')
return data
def merge_snippets(data):
new_data = []
window = 20 # number of sentences to combine
stride = 4 # number of sentences to 'stride' over, used to create overlap
for i in tqdm(range(0, len(data), stride)):
i_end = min(len(data)-1, i+window)
if data[i]['title'] != data[i_end]['title']:
# in this case we skip this entry as we have start/end of two videos
continue
text = ' '.join(data[i:i_end]['text'])
# create the new merged dataset
new_data.append({
'start': data[i]['start'],
'end': data[i_end]['end'],
'title': data[i]['title'],
'text': text,
'id': data[i]['id'],
'url': data[i]['url'],
'published': data[i]['published'],
'channel_id': data[i]['channel_id']
})
return new_data
def create_embedding(embed_model, query):
res = openai.Embedding.create(
input=[query], #for example: this list (, comma separated) "Sample document text goes here", "there will be several phrases in each batch",
engine=embed_model # for example: embed_model = "text-embedding-ada-002"
)
return res
def initialize_index(index_name, api_key, environment):
# initialize connection to pinecone (get API key at app.pinecone.io)
pinecone.init(
api_key=api_key,
environment=environment
)
# check if index already exists (it shouldn't if this is first time)
if index_name not in pinecone.list_indexes():
# if does not exist, create index
pinecone.create(
index_name,
dimension=dimension_embedding,
metric='cosine',
metadata_config={'indexed': ['channel_id', 'published']}
)
# connect to index
index = pinecone.Index(index_name)
return index
def populate_index(index, new_data):
batch_size = 100 # how many embeddings we create and insert at once
for i in tqdm(range(0, len(new_data), batch_size)):
# find end of batch
i_end = min(len(new_data), i+batch_size)
meta_batch = new_data[i:i_end]
# get ids
ids_batch = [x['id'] for x in meta_batch]
# get texts to encode
texts = [x['text'] for x in meta_batch]
# create embeddings (try-except added to avoid RateLimitError)
try:
res = openai.Embedding.create(input=texts, engine=embed_model)
except:
done = False
while not done:
sleep(5)
try:
res = openai.Embedding.create(input=texts, engine=embed_model)
done = True
except:
pass
embeds = [record['embedding'] for record in res['data']]
# cleanup metadata
meta_batch = [{
'start': x['start'],
'end': x['end'],
'title': x['title'],
'text': x['text'],
'url': x['url'],
'published': x['published'],
'channel_id': x['channel_id']
} for x in meta_batch]
to_upsert = list(zip(ids_batch, embeds, meta_batch))
# upsert to Pinecone
index.upsert(vectors=to_upsert)
return index
def retrieve(query, index, embed_model):
res = create_embedding(embed_model, query)
# retrieve from Pinecone
xq = res['data'][0]['embedding']
# initialize the index - needs to be done once outside this function
# index=initialize_index(index_name, res, environment)
#populate the index - needs to be done once outside this function
# index=populate_index(index,new_data)
# get relevant contexts
res = index.query(xq, top_k=3, include_metadata=True)
contexts = [x['metadata']['text'] for x in res['matches']]
# build our prompt with the retrieved contexts included
prompt_start = ("Answer the question based on the context below.\n\n"+"Context:\n")
prompt_end = (f"\n\nQuestion: {query}\nAnswer:")
# append contexts until hitting limit
for i in range(1, len(contexts)):
if len("\n\n---\n\n".join(contexts[:i])) >= limit:
prompt = (prompt_start +
"\n\n---\n\n".join(contexts[:i-1]) +
prompt_end)
break
elif i == len(contexts)-1:
prompt = (prompt_start +
"\n\n---\n\n".join(contexts) +
prompt_end)
return prompt | [
"Answer the question based on the context below.\n\nContext:\n",
"\n\nQuestion: Which training method should I use for sentence transformers when I only have pairs of related sentences?\nAnswer:",
"\n\n---\n\n"
] |
2024-01-10 | aidenaap/ImagePromptEnhancer | responses.py | from discord import Embed
from prompts import *
import datetime
# create error embed in case of mistakes
def create_error_embed(error_message:str) -> Embed:
embed = Embed(
title='Error',
description=error_message,
color=0xff0000,
)
return embed
# function to handle all user commands (!)
def get_response(message:str) -> Embed:
l_message = message.lower()
# help
if l_message == 'help':
embed = Embed(
title='Figure it out yourself huh',
description='Here\'s a list of commands you can use:',
color= 0x8834d8,
)
embed.add_field(name='!help', value='Displays this message.', inline=False)
embed.add_field(name='!trending d', value='Displays top google searches for the day.', inline=False)
embed.add_field(name='!trending wX', value='Weekly top searches. Must specify [1 - 4] weeks as X.', inline=False)
embed.add_field(name='!trending mX', value='Monthly top searches. Must specify [1, 3] months as X.', inline=False)
embed.add_field(name='!trending yXXXX', value='Yearly top searches. Must specify a year before the current year as XXXX.', inline=False)
embed.add_field(name='!enhance', value='Enhance your AI images with a descriptive prompt from OpenAI\'s chatbot.', inline=False)
return embed
# trending
# add another argument after the date for country later on
# !!! weekly/monthly support dropped by google FIX !!!
# !trending y2021 ___
# !trending m3 ___
# !trending w1 ___
# !trending d ___
if l_message.startswith('trending'):
l_message = l_message[8:]
# gather arguments
args = l_message.split(' ')
number_of_args = len(args)
# check presence of arguments
if number_of_args < 1:
return create_error_embed('You must specify a time period.')
elif number_of_args > 6:
return create_error_embed('You must specify no more than 5 keywords.')
# handle date
full_date = args[0]
date_type = full_date[0]
# check if date_type is valid
if date_type in ['y', 'm', 'w', 'd']:
# get time period as integer
if date_type != 'd':
try:
time_period = int(full_date[1:])
except:
return create_error_embed('Time period unable to be obtained')
# if date_type not valid cancel
else:
return create_error_embed('You must specify a valid time period. (y,m,w,d)')
# optional args (gather keywords)
keywords = []
if number_of_args > 1 and number_of_args < 6:
for i in range(1, number_of_args):
keywords.append(args[i])
if len(keywords) > 0:
create_payload(keywords)
# call appropriate functions with error checking params
embed_title = ""
embed_description = ""
# get daily trends
if date_type == 'd':
embed_title = "Daily Trends"
embed_description = str(datetime.datetime.now().date())
df = get_daily_trends()
# get weekly trends
elif date_type == 'w':
if time_period <= 4:
df = get_weekly_trends(weeks=time_period)
else:
return create_error_embed('You must specify 4 or less weeks.')
# get monthly trends
elif date_type == 'm':
if time_period in [1, 3]:
df = get_monthly_trends(months=time_period)
else:
return create_error_embed('You must specify 1 or 3 months.')
# get yearly trends
elif date_type == 'y':
curYear = datetime.datetime.now().year
if time_period < curYear:
embed_title = "Yearly Trends"
embed_description = str(time_period)
df = get_yearly_trends(year=time_period)
else:
return create_error_embed('You must specify a year before the current year.')
else:
return create_error_embed('date type error')
# create embed
embed = Embed(
title=embed_title,
description=embed_description,
color=0x00ff00,
)
# move data from df into embed fields
# iterate through resulting df
for index, row in df:
embed.add_field(name=f'{index}. ', value=row[0], inline=True)
return embed
# prompt enhancer
# if l_message starts with enhance
if l_message.startswith('enhance'):
l_message = l_message[8:]
# give user options for version, testp (lifelike), ar, quality, chaos, and creative
# then make a call to chatgpt openai
# then return the response as embed
return 'I didn\'t understand what you said there' | [] |
2024-01-10 | DanielCaicedo97/Deep_Learnining_Platzi | 12%20Desarrollo%20ChatBot~scripts~1_tweet_sentiment.py | import os
from dotenv import load_dotenv
import openai
# Carga las variables de entorno desde el archivo .env
load_dotenv("../envs/ap.env")
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
model="text-davinci-003",
prompt="Decide si el sentimiento de un Tweet es positivo, neutral, o negativo. \
\n\nTweet: \"#LoNuevoEnPlatzi es el Platzibot ๐ค. Un asistente creado con Inteligencia Artificial para acompaรฑarte en tu proceso de aprendizaje.\
\"\nSentiment:",
temperature=0,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.5,
presence_penalty=0.0
)
print(response.choices[0].text)
| [
"Decide si el sentimiento de un Tweet es positivo, neutral, o negativo. \n\nTweet: \"#LoNuevoEnPlatzi es el Platzibot ๐ค. Un asistente creado con Inteligencia Artificial para acompaรฑarte en tu proceso de aprendizaje. \"\nSentiment:"
] |
2024-01-10 | DanielCaicedo97/Deep_Learnining_Platzi | 13%20Curso%20de%20LangChain~scripts~1_hola_langchain.py | # --- Carga de documents
import os
import requests
from langchain.document_loaders import PyPDFLoader
urls = [
'https://arxiv.org/pdf/2306.06031v1.pdf',
'https://arxiv.org/pdf/2306.12156v1.pdf',
'https://arxiv.org/pdf/2306.14289v1.pdf',
'https://arxiv.org/pdf/2305.10973v1.pdf',
'https://arxiv.org/pdf/2306.13643v1.pdf'
]
ml_papers = []
for i, url in enumerate(urls):
filename = f'paper{i+1}.pdf'
# Verifico si el archivo no ha sido descargado previamente
if not os.path.exists(filename):
response = requests.get(url)
with open(filename, 'wb') as f:
f.write(response.content)
print(f'Descargado {filename}')
else:
print(f'{filename} ya existe, cargando desde el disco.')
loader = PyPDFLoader(filename)
data = loader.load()
ml_papers.extend(data)
# Utiliza la lista ml_papers para acceder a los elementos de todos los documentos descargados
print('Contenido de ml_papers:')
print()
print(type(ml_papers), len(ml_papers), ml_papers[3])
# --- Split de documents
# Los documentos NO pueden ser procesados directamente por LLMs porque contienen demasiado texto, sin embargo, podemos
# particionarlo en conjuntos de texto mรกs pequeรฑos para entonces poder acceder a su informaciรณn.
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Cada particiรณn contendrรก 1500 palabras, y tendrรกn una intersecciรณn de 200, de modo que la cadena 2 comparte 200
# palabras con la cadena 1 y con la cadena 3
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1500,
chunk_overlap=200,
length_function=len
)
documents = text_splitter.split_documents(ml_papers)
# Ahora podemos revisar de nuevo la cantidad de `documentos` y ver un ejemplo del mismo
print(len(documents), documents[10])
# --- Embeddings e ingesta a base de datos vectorial
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from dotenv import load_dotenv
# leo el archivo keys.env y obtengo mi Api KEY de OpenAI
load_dotenv("../secret/keys.env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Es importante que quede seteado como una variable de entorno porque serรก utilizado mรกs adelante
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
# Crea un objeto capaz de convertir el texto a un vector utilizando como base el modelo de ADA-002 de OpenAI
# En este punto es importante que hayas seteado tu OPENAI API KEY como variable de entorno, para que puedas acceder
# a este servicio
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002")
# Con ayuda de Chroma, creamos un objeto vectorstore para almacenar las representaciones vectoriales de los textos
# contenidos en `documents` una cadena de texto previamente generada
vectorstore = Chroma.from_documents(
documents=documents,
embedding=embeddings
)
# Una vez que hayas creado la Base de datos vectorial, el parรกmetro search_kwargs `k` me permite definir hasta cuantos
# vectores similares voy a buscar al momento de encontrar informaciรณn para una pregunta. `retriever` serรก entonces
# nuestra base de datos de vectores que servirรก para aรฑadir informaciรณn reciente a los LLMs con el fin de responder
# preguntas.
retriever = vectorstore.as_retriever(
search_kwargs={"k": 3}
)
# --- Modelos de Chat y cadenas para consulta de informaciรณn
from langchain.chat_models import ChatOpenAI
# Voy a crear un objeto `chat` de la clase ChatOpenAI indicando que el engine a utilizar serรก GPT 3.5 y cuya temperatura
# serรก 0 lo que signfica que tendrรก respuestas muy restrictivas basadas รบnicamente en el texto que conoce y tendrรก
# poca creatividad al momento de responder peticiones.
chat = ChatOpenAI(
openai_api_key=OPENAI_API_KEY,
model_name='gpt-3.5-turbo',
temperature=0.0
)
from langchain.chains import RetrievalQA
# Finalmente, creamos una cadena `chain` del tipo `Question Answer` pregunta-respuesta. Como LLM utilizarรก al objeto
# `chat` que es una instancia de ChatGPT 3.5, el tipo de cadena es `stuff` que significa que vamos a utilizar tanta
# informaciรณn como quepa en el prompt, y finalmente el `retriever` serรก la base de datos vectoriales que hemos definido
# previamente.
qa_chain = RetrievalQA.from_chain_type(
llm=chat,
chain_type="stuff",
retriever=retriever
)
# Vamos a poner a prueba nuestra cadena de preguntas y respuestas:
query = "quรฉ es fingpt?"
print("--->", query)
print(qa_chain.run(query))
query = "quรฉ hace complicado entrenar un modelo como el fingpt?"
print("--->", query)
print(qa_chain.run(query))
query = "quรฉ es fast segment?"
print("--->", query)
print(qa_chain.run(query))
query = "cuรกl es la diferencia entre fast sam y mobile sam?"
print("--->", query)
print(qa_chain.run(query))
| [] |
2024-01-10 | DanielCaicedo97/Deep_Learnining_Platzi | 13%20Curso%20de%20LangChain~scripts~2_falcon_example.py | from transformers import AutoTokenizer, pipeline
import torch
model = "tiiuae/falcon-7b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model)
# task: que trabajo estarรก realizando nuestro modelo
# trust_remote_code: es porque en esta ocasiรณn estamos empleando un modelo que no pertenece directamente a los
# `transformers`de HugginFace, entonces es darle permiso de acceder a un modelo ajeno a HF.
# device_map: se usa en conjunto a la biblioteca `accelerate` para buscar la configuraciรณn mรกs รณptima de Hardware para
# correr nuestros procesos.
pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map="auto"
)
print("*"*64)
print(type(pipeline))
from langchain import HuggingFacePipeline
llm_falcon = HuggingFacePipeline(
pipeline=pipeline,
model_kwargs={
'temperature': 0,
'max_length': 200,
'do_sample': True, # generรก un sampleo aleatorio del texto en diferentes momentos
'top_k': 10, # numero de candidatos que va a evaluar el modelo, para decidir cuรกl es el mejor.
'num_return_sequences': 1, # cantidad de respuestas a generar
'eos_token_id': tokenizer.eos_token_id # eos = end of sentence, viene dado por el tokenizador que ya hemos usado
}
)
print("*"*64)
print(llm_falcon)
ans = llm_falcon("What is AI?")
print("*"*64)
print(ans)
| [] |
2024-01-10 | AlaiY95/ChatGPT | src~revChatGPT~V1.py | """
Standard ChatGPT
"""
from __future__ import annotations
import base64
import contextlib
import json
import logging
import os
import os.path as osp
import time
import uuid
from functools import wraps
from os import environ
from os import getenv
from typing import NoReturn
import requests
from httpx import AsyncClient
from OpenAIAuth import Authenticator
from OpenAIAuth import Error as AuthError
from . import typing as t
from .utils import create_completer
from .utils import create_session
from .utils import get_input
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s",
)
log = logging.getLogger(__name__)
def logger(is_timed: bool):
"""Logger decorator
Args:
is_timed (bool): Whether to include function running time in exit log
Returns:
_type_: decorated function
"""
def decorator(func):
wraps(func)
def wrapper(*args, **kwargs):
log.debug(
"Entering %s with args %s and kwargs %s",
func.__name__,
args,
kwargs,
)
start = time.time()
out = func(*args, **kwargs)
end = time.time()
if is_timed:
log.debug(
"Exiting %s with return value %s. Took %s seconds.",
func.__name__,
out,
end - start,
)
else:
log.debug("Exiting %s with return value %s", func.__name__, out)
return out
return wrapper
return decorator
BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://bypass.churchless.tech/api/"
bcolors = t.colors()
class Chatbot:
"""
Chatbot class for ChatGPT
"""
@logger(is_timed=True)
def __init__(
self,
config: dict[str, str],
conversation_id: str | None = None,
parent_id: str | None = None,
session_client=None,
lazy_loading: bool = False,
) -> None:
"""Initialize a chatbot
Args:
config (dict[str, str]): Login and proxy info. Example:
{
"email": "OpenAI account email",
"password": "OpenAI account password",
"session_token": "<session_token>"
"access_token": "<access_token>"
"proxy": "<proxy_url_string>",
"paid": True/False, # whether this is a plus account
}
More details on these are available at https://github.com/acheong08/ChatGPT#configuration
conversation_id (str | None, optional): Id of the conversation to continue on. Defaults to None.
parent_id (str | None, optional): Id of the previous response message to continue on. Defaults to None.
session_client (_type_, optional): _description_. Defaults to None.
Raises:
Exception: _description_
"""
user_home = getenv("HOME")
if user_home is None:
self.cache_path = osp.join(os.getcwd(), ".chatgpt_cache.json")
else:
# mkdir ~/.config/revChatGPT
if not osp.exists(osp.join(user_home, ".config")):
os.mkdir(osp.join(user_home, ".config"))
if not osp.exists(osp.join(user_home, ".config", "revChatGPT")):
os.mkdir(osp.join(user_home, ".config", "revChatGPT"))
self.cache_path = osp.join(user_home, ".config", "revChatGPT", "cache.json")
self.config = config
self.session = session_client() if session_client else requests.Session()
try:
cached_access_token = self.__get_cached_access_token(
self.config.get("email", None),
)
except t.Error as error:
if error.code == 5:
raise error
cached_access_token = None
if cached_access_token is not None:
self.config["access_token"] = cached_access_token
if "proxy" in config:
if not isinstance(config["proxy"], str):
error = TypeError("Proxy must be a string!")
raise error
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
if isinstance(self.session, AsyncClient):
proxies = {
"http://": config["proxy"],
"https://": config["proxy"],
}
self.session = AsyncClient(proxies=proxies)
else:
self.session.proxies.update(proxies)
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
self.lazy_loading = lazy_loading
self.__check_credentials()
@logger(is_timed=True)
def __check_credentials(self) -> None:
"""Check login info and perform login
Any one of the following is sufficient for login. Multiple login info can be provided at the same time and they will be used in the order listed below.
- access_token
- session_token
- email + password
Raises:
Exception: _description_
AuthError: _description_
"""
if "access_token" in self.config:
self.set_access_token(self.config["access_token"])
elif "session_token" in self.config:
pass
elif "email" not in self.config or "password" not in self.config:
error = t.AuthenticationError("Insufficient login details provided!")
raise error
if "access_token" not in self.config:
try:
self.login()
except AuthError as error:
raise error
@logger(is_timed=False)
def set_access_token(self, access_token: str) -> None:
"""Set access token in request header and self.config, then cache it to file.
Args:
access_token (str): access_token
"""
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
self.session.cookies.update(
{
"library": "revChatGPT",
},
)
self.config["access_token"] = access_token
email = self.config.get("email", None)
if email is not None:
self.__cache_access_token(email, access_token)
@logger(is_timed=False)
def __get_cached_access_token(self, email: str | None) -> str | None:
"""Read access token from cache
Args:
email (str | None): email of the account to get access token
Raises:
Error: _description_
Error: _description_
Error: _description_
Returns:
str | None: access token string or None if not found
"""
email = email or "default"
cache = self.__read_cache()
access_token = cache.get("access_tokens", {}).get(email, None)
# Parse access_token as JWT
if access_token is not None:
try:
# Split access_token into 3 parts
s_access_token = access_token.split(".")
# Add padding to the middle part
s_access_token[1] += "=" * ((4 - len(s_access_token[1]) % 4) % 4)
d_access_token = base64.b64decode(s_access_token[1])
d_access_token = json.loads(d_access_token)
except base64.binascii.Error:
error = t.Error(
source="__get_cached_access_token",
message="Invalid access token",
code=t.ErrorType.INVALID_ACCESS_TOKEN_ERROR,
)
raise error from None
except json.JSONDecodeError:
error = t.Error(
source="__get_cached_access_token",
message="Invalid access token",
code=t.ErrorType.INVALID_ACCESS_TOKEN_ERROR,
)
raise error from None
exp = d_access_token.get("exp", None)
if exp is not None and exp < time.time():
error = t.Error(
source="__get_cached_access_token",
message="Access token expired",
code=t.ErrorType.EXPIRED_ACCESS_TOKEN_ERROR,
)
raise error
return access_token
@logger(is_timed=False)
def __cache_access_token(self, email: str, access_token: str) -> None:
"""Write an access token to cache
Args:
email (str): account email
access_token (str): account access token
"""
email = email or "default"
cache = self.__read_cache()
if "access_tokens" not in cache:
cache["access_tokens"] = {}
cache["access_tokens"][email] = access_token
self.__write_cache(cache)
@logger(is_timed=False)
def __write_cache(self, info: dict) -> None:
"""Write cache info to file
Args:
info (dict): cache info, current format
{
"access_tokens":{"[email protected]": 'this account's access token', }
}
"""
dirname = osp.dirname(self.cache_path) or "."
os.makedirs(dirname, exist_ok=True)
json.dump(info, open(self.cache_path, "w", encoding="utf-8"), indent=4)
@logger(is_timed=False)
def __read_cache(self):
try:
cached = json.load(open(self.cache_path, encoding="utf-8"))
except (FileNotFoundError, json.decoder.JSONDecodeError):
cached = {}
return cached
@logger(is_timed=True)
def login(self) -> None:
if (
"email" not in self.config or "password" not in self.config
) and "session_token" not in self.config:
log.error("Insufficient login details provided!")
raise Exception("Insufficient login details provided!")
auth = Authenticator(
email_address=self.config.get("email"),
password=self.config.get("password"),
proxy=self.config.get("proxy"),
)
if self.config.get("session_token"):
log.debug("Using session token")
auth.session_token = self.config["session_token"]
auth.get_access_token()
if auth.access_token is None:
del self.config["session_token"]
self.login()
return
else:
log.debug("Using authenticator to get access token")
auth.begin()
self.config["session_token"] = auth.session_token
auth.get_access_token()
self.set_access_token(auth.access_token)
@logger(is_timed=True)
def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str | None = None,
timeout: float = 360,
) -> str:
"""Ask a question to the chatbot
Args:
prompt (str): The question
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str | None, optional): UUID for the message to continue on. Defaults to None.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Raises:
Error: _description_
Exception: _description_
Error: _description_
Error: _description_
Error: _description_
Yields:
_type_: _description_
"""
if parent_id is not None and conversation_id is None:
log.error("conversation_id must be set once parent_id is set")
error = t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.USER_ERROR,
)
raise error
if conversation_id is not None and conversation_id != self.conversation_id:
log.debug("Updating to new conversation by setting parent_id to None")
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None:
parent_id = str(uuid.uuid4())
log.debug("New conversation, setting parent_id to new UUID4: %s", parent_id)
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
if self.lazy_loading:
log.debug(
"Conversation ID %s not found in conversation mapping, try to get conversation history for the given ID",
conversation_id,
)
with contextlib.suppress(Exception):
history = self.get_msg_history(conversation_id)
self.conversation_mapping[conversation_id] = history[
"current_node"
]
else:
log.debug(
"Conversation ID %s not found in conversation mapping, mapping conversations",
conversation_id,
)
self.__map_conversations()
if conversation_id in self.conversation_mapping:
log.debug(
"Conversation ID %s found in conversation mapping, setting parent_id to %s",
conversation_id,
self.conversation_mapping[conversation_id],
)
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
log.debug("Sending the payload")
log.debug(json.dumps(data, indent=2))
self.conversation_id_prev_queue.append(
data["conversation_id"],
)
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=f"{BASE_URL}conversation",
data=json.dumps(data),
timeout=timeout,
stream=True,
)
self.__check_response(response)
done: bool = False
for line in response.iter_lines():
# remove b' and ' at the beginning and end and ignore case
line = str(line)[2:-1]
if line.lower() == "internal server error":
log.error("Internal Server Error: %s", line)
error = t.Error(
source="ask",
message="Internal Server Error",
code=t.ErrorType.SERVER_ERROR,
)
raise error
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if line == "[DONE]":
done = True
break
line = line.replace('\\"', '"')
line = line.replace("\\'", "'")
line = line.replace("\\\\", "\\")
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line) or response.status_code != 200:
log.error("Field missing", exc_info=True)
log.error(response.text)
if response.status_code == 401:
error = t.Error(
source="ask",
message="Permission denied",
code=t.ErrorType.AUTHENTICATION_ERROR,
)
raise error
elif response.status_code == 403:
error = t.Error(
source="ask",
message="Cloudflare triggered a 403 error",
code=t.ErrorType.CLOUDFLARE_ERROR,
)
raise error
elif response.status_code == 429:
error = t.Error(
source="ask",
message="Rate limit exceeded",
code=t.ErrorType.RATE_LIMIT_ERROR,
)
raise error
else:
error = t.Error(
source="ask",
message=line,
code=t.ErrorType.SERVER_ERROR,
)
raise error
message: str = line["message"]["content"]["parts"][0]
if message == prompt:
continue
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
try:
model = line["message"]["metadata"]["model_slug"]
except KeyError:
model = None
log.debug("Received message: %s", message)
log.debug("Received conversation_id: %s", conversation_id)
log.debug("Received parent_id: %s", parent_id)
yield {
"message": message.strip("\n"),
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
}
if not done:
pass
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
@logger(is_timed=False)
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except (TypeError, KeyError):
return False
return True
@logger(is_timed=False)
def __check_response(self, response: requests.Response) -> None:
"""Make sure response is success
Args:
response (_type_): _description_
Raises:
Error: _description_
"""
if response.status_code != 200:
print(response.text)
error = t.Error(
source="OpenAI",
message=response.text,
code=response.status_code,
)
raise error
@logger(is_timed=True)
def get_conversations(
self,
offset: int = 0,
limit: int = 20,
encoding: str | None = None,
) -> list:
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = f"{BASE_URL}conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
data = json.loads(response.text)
return data["items"]
@logger(is_timed=True)
def get_msg_history(self, convo_id: str, encoding: str | None = None) -> list:
"""
Get message history
:param id: UUID of conversation
:param encoding: String
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
return json.loads(response.text)
@logger(is_timed=True)
def gen_title(self, convo_id: str, message_id: str) -> str:
"""
Generate title for conversation
"""
response = self.session.post(
f"{BASE_URL}conversation/gen_title/{convo_id}",
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
return response.json().get("title", "Error generating title")
@logger(is_timed=True)
def change_title(self, convo_id: str, title: str) -> None:
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = self.session.patch(url, data=json.dumps({"title": title}))
self.__check_response(response)
@logger(is_timed=True)
def delete_conversation(self, convo_id: str) -> None:
"""
Delete conversation
:param id: UUID of conversation
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=True)
def clear_conversations(self) -> None:
"""
Delete all conversations
"""
url = f"{BASE_URL}conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=False)
def __map_conversations(self) -> None:
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
@logger(is_timed=False)
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
@logger(is_timed=False)
def rollback_conversation(self, num: int = 1) -> None:
"""
Rollback the conversation.
:param num: Integer. The number of messages to rollback
:return: None
"""
for _ in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
class AsyncChatbot(Chatbot):
"""
Async Chatbot class for ChatGPT
"""
def __init__(
self,
config: dict,
conversation_id: str | None = None,
parent_id: str | None = None,
) -> None:
super().__init__(
config=config,
conversation_id=conversation_id,
parent_id=parent_id,
session_client=AsyncClient,
)
async def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str | None = None,
timeout: int = 360,
) -> dict:
"""
Ask a question to the chatbot
"""
if parent_id is not None and conversation_id is None:
error = t.Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=t.ErrorType.SERVER_ERROR,
)
raise error
if conversation_id is not None and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None:
parent_id = str(uuid.uuid4())
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
await self.__map_conversations()
parent_id = self.conversation_mapping[conversation_id]
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
self.conversation_id_prev_queue.append(
data["conversation_id"],
)
self.parent_id_prev_queue.append(data["parent_message_id"])
async with self.session.stream(
method="POST",
url=f"{BASE_URL}conversation",
data=json.dumps(data),
timeout=timeout,
) as response:
self.__check_response(response)
async for line in response.aiter_lines():
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if "[DONE]" in line:
break
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
raise Exception(f"Field missing. Details: {str(line)}")
message = line["message"]["content"]["parts"][0]
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
model = (
line["message"]["metadata"]["model_slug"]
if "model_slug" in line["message"]["metadata"]
else None
)
yield {
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
}
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
async def get_conversations(self, offset: int = 0, limit: int = 20) -> list:
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = f"{BASE_URL}conversations?offset={offset}&limit={limit}"
response = await self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
async def get_msg_history(
self,
convo_id: str,
encoding: str | None = "utf-8",
) -> dict:
"""
Get message history
:param id: UUID of conversation
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = await self.session.get(url)
if encoding is not None:
response.encoding = encoding
self.__check_response(response)
return json.loads(response.text)
return None
async def gen_title(self, convo_id: str, message_id: str) -> None:
"""
Generate title for conversation
"""
url = f"{BASE_URL}conversation/gen_title/{convo_id}"
response = await self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
await self.__check_response(response)
async def change_title(self, convo_id: str, title: str) -> None:
"""
Change title of conversation
:param convo_id: UUID of conversation
:param title: String
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = await self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
async def delete_conversation(self, convo_id: str) -> None:
"""
Delete conversation
:param convo_id: UUID of conversation
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = await self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
async def clear_conversations(self) -> None:
"""
Delete all conversations
"""
url = f"{BASE_URL}conversations"
response = await self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
async def __map_conversations(self) -> None:
conversations = await self.get_conversations()
histories = [await self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except (TypeError, KeyError):
return False
return True
def __check_response(self, response) -> None:
response.raise_for_status()
get_input = logger(is_timed=False)(get_input)
@logger(is_timed=False)
def configure() -> dict:
"""
Looks for a config file in the following locations:
"""
config_files = ["config.json"]
if xdg_config_home := getenv("XDG_CONFIG_HOME"):
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
if user_home := getenv("HOME"):
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
if config_file := next((f for f in config_files if osp.exists(f)), None):
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
@logger(is_timed=False)
def main(config: dict) -> NoReturn:
"""
Main function for the chatGPT program.
"""
chatbot = Chatbot(
config,
conversation_id=config.get("conversation_id"),
parent_id=config.get("parent_id"),
)
def handle_commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
!setconversation - Changes the conversation
""",
)
elif command == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
elif command == "!config":
print(json.dumps(chatbot.config, indent=4))
elif command.startswith("!rollback"):
try:
rollback = int(command.split(" ")[1])
except IndexError:
logging.exception(
"No number specified, rolling back 1 message",
stack_info=True,
)
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
elif command.startswith("!setconversation"):
try:
chatbot.conversation_id = chatbot.config[
"conversation_id"
] = command.split(" ")[1]
print("Conversation has been changed")
except IndexError:
log.exception(
"Please include conversation UUID in command",
stack_info=True,
)
print("Please include conversation UUID in command")
elif command == "!exit":
exit()
else:
return False
return True
session = create_session()
completer = create_completer(
["!help", "!reset", "!config", "!rollback", "!exit", "!setconversation"],
)
print()
try:
while True:
print(f"{bcolors.OKBLUE + bcolors.BOLD}You: {bcolors.ENDC}")
prompt = get_input(session=session, completer=completer)
if prompt.startswith("!") and handle_commands(prompt):
continue
print()
print(f"{bcolors.OKGREEN + bcolors.BOLD}Chatbot: {bcolors.ENDC}")
prev_text = ""
for data in chatbot.ask(prompt):
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print(bcolors.ENDC)
print()
except (KeyboardInterrupt, EOFError):
exit()
if __name__ == "__main__":
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print(
f"{bcolors.BOLD}{bcolors.WARNING}Press Esc followed by Enter or Alt+Enter to send a message.{bcolors.ENDC}",
)
main(configure())
| [
"text",
"content_type"
] |
2024-01-10 | lorsan/aibot | openai_helper.py | import os
import logging
import json
import openai
#from llama_index import SimpleDirectoryReader, VectorStoreIndex, LLMPredictor, ServiceContext, GPTVectorStoreIndex
from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext
from langchain.chat_models import ChatOpenAI
logger = logging.getLogger("bot")
logger.setLevel("DEBUG")
class OpenAiHelper:
def __init__(self, token):
logging.info(f"Initializing OpenAI helper. Selected model: gpt3 di llamaindex")
os.environ["OPENAI_API_KEY"] = token
openai.api_key = os.environ["OPENAI_API_KEY"]
self.max_input_size = 4096
self.num_outputs = 512
self.max_chunk_overlap_ratio = 0.1
self.chunk_size_limit = 600
#self.temperature = temperature
#self.max_tokens = max_tokens
#self.model = model
#self.llm_predictor = LLMPredictor(llm=self)
#self.service_context = ServiceContext.from_defaults(llm_predictor=self.llm_predictor)
def get_response(self, message_text):
try:
logging.info(f"Getting response from OpenAI. Message: {message_text}")
#### CODICE CHE FUNZIONA #####
prompt_helper = PromptHelper(self.max_input_size, self.num_outputs, self.max_chunk_overlap_ratio, chunk_size_limit= self.chunk_size_limit)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-4", max_tokens=self.num_outputs))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
loader = SimpleDirectoryReader('villa_romana_nonni_arii', recursive=True, exclude_hidden=True)
documents = loader.load_data()
index = GPTVectorStoreIndex(documents, service_context=service_context, prompt_helper=prompt_helper)
query_engine = index.as_query_engine(vector_store_query_mode="default")
ai_response = query_engine.query(message_text)
##### FINE CODICE CHE FUNZIONA #######
return str(ai_response)
except Exception as e:
logging.error(f"Failed to get response from OpenAI: {e}")
raise | [] |
2024-01-10 | d3287t328/latinalinguamachina | scripts~chromadb_embeddings.py | # allows you to chat with all the text or markdown files in the dir /tmp/chroma
import os
from glob import glob
from tqdm import tqdm
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import TextLoader, DirectoryLoader
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
# Set OpenAI API Key
os.environ["OPENAI_API_KEY"] = ""
# UTF-8 Text Loader
class UTF8TextLoader(TextLoader):
def load(self):
with open(self.file_path, 'r', encoding='utf-8') as f:
text = f.read()
return [self.create_document(text, {"source": self.file_path})]
# Modified DirectoryLoader to include subdirectories
class MyDirectoryLoader(DirectoryLoader):
def __init__(self, directory, glob="**/*", loader_cls=UTF8TextLoader, **loader_kwargs):
super().__init__(directory, glob, loader_cls, **loader_kwargs)
# Load documents from directory
def load_documents_from_directory(directory, patterns):
documents = []
for pattern in patterns:
loader = MyDirectoryLoader(directory, glob=pattern)
documents.extend(loader.load())
return documents
# Directory and patterns (include subdirectories)
directory_path = '/tmp/chroma/'
patterns = ["**/*.txt", "**/*.md"]
# Load and process documents
documents = load_documents_from_directory(directory_path, patterns)
# Text splitting
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
texts = text_splitter.split_documents(documents)
# Embedding and persistence
persist_directory = 'db'
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=texts, embedding=embedding, persist_directory=persist_directory)
vectordb.persist()
vectordb = None
# Load from disk
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
# Create retriever and QA chain
retriever = vectordb.as_retriever()
qa_chain = RetrievalQA.from_chain_type(llm=ChatOpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True)
# LLM Response Processing
def process_llm_response(llm_response):
print(llm_response['result'])
print('\n\nSources:')
for source in llm_response["source_documents"]:
print(source.metadata['source'])
# Interactive QA
while True:
query = input("Enter your query: ")
if query.lower() == 'quit':
break
llm_response = qa_chain(query)
process_llm_response(llm_response)
# Cleanup
vectordb.delete_collection()
vectordb.persist()
| [] |
2024-01-10 | d3287t328/latinalinguamachina | scripts~pgvector-langchain_script.py | # Derived from the implementation guide one shot. https://github.com/timescale/vector-cookbook/blob/main/intro_langchain_pgvector/langchain_pgvector_intro.ipynb
import os
import pandas as pd
import numpy as np
from dotenv import load_dotenv, find_dotenv
from langchain.vectorstores.pgvector import PGVector, DistanceStrategy
from langchain.text_splitter import TokenTextSplitter
from langchain.document_loaders import DataFrameLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from IPython.display import Markdown, display
import tiktoken
load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
host= os.environ['TIMESCALE_HOST']
port= os.environ['TIMESCALE_PORT']
user= os.environ['TIMESCALE_USER']
password= os.environ['TIMESCALE_PASSWORD']
dbname= os.environ['TIMESCALE_DBNAME']
CONNECTION_STRING = f"postgresql+psycopg2://{user}:{password}@{host}:{port}/{dbname}?sslmode=require"
df = pd.read_csv('blog_posts_data.csv')
def num_tokens_from_string(string: str, encoding_name = "cl100k_base") -> int:
if not string:
return 0
encoding = tiktoken.get_encoding(encoding_name)
return len(encoding.encode(string))
text_splitter = TokenTextSplitter(chunk_size=512,chunk_overlap=103)
new_list = []
for i in range(len(df.index)):
text = df['content'][i]
token_len = num_tokens_from_string(text)
if token_len <= 512:
new_list.append([df['title'][i], df['content'][i], df['url'][i]])
else:
split_text = text_splitter.split_text(text)
new_list.extend(
[df['title'][i], split_text[j], df['url'][i]]
for j in range(len(split_text))
)
df_new = pd.DataFrame(new_list, columns=['title', 'content', 'url'])
df_new.to_csv('blog_posts_data_chunked.csv', index=False)
loader = DataFrameLoader(df_new, page_content_column = 'content')
docs = loader.load()
embeddings = OpenAIEmbeddings()
db = PGVector.from_documents(
documents= docs,
embedding = embeddings,
collection_name= "blog_posts",
distance_strategy = DistanceStrategy.COSINE,
connection_string=CONNECTION_STRING)
retriever = db.as_retriever(search_kwargs={"k": 3})
llm = ChatOpenAI(temperature = 0.0, model = 'gpt-3.5-turbo-16k')
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True,
)
query = "How does Edeva use continuous aggregates?"
response = qa_stuff.run(query)
display(Markdown(response))
qa_stuff_with_sources = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
verbose=True,
)
responses = qa_stuff_with_sources({"query": query})
def construct_result_with_sources():
result = responses['result']
result += "\n\n"
result += "Sources used:"
for i in range(len(source_content)):
result += "\n\n"
result += source_metadata[i]['title']
result += "\n\n"
result += source_metadata[i]['url']
return result
display(Markdown(construct_result_with_sources()))
| [] |
2024-01-10 | fabien5525/IPSSI_web_scrapping_tp1 | handleOpenAI.py | import openai
import requests
from bs4 import BeautifulSoup
from urllib.parse import quote
def commandList() -> list:
return [
'/translate',
'/summary',
'/imagine',
'/code',
'/actu',
'/json'
]
def handleOpenAI(api_key, prompt) -> str:
if not prompt.startswith('/') or prompt.split(' ')[0] not in commandList():
return 'Commande non reconnue' + '\n' + 'Liste des commandes : ' + '\n' + '\n'.join(commandList())
prompt = prompt.strip()
command = prompt.split(' ')[0]
search = prompt.replace(command, '').strip()
openai.api_key = api_key
match command:
case '/translate':
return handleTranslate(search)
case '/summary':
return handleSummary(search)
case '/imagine':
return handleImagine(search)
case '/code':
return handleCode(search)
case '/actu':
return handleActu(search)
case '/json':
return handleJson(search)
return ''
def handleTranslate(prompt) -> str:
customPrompt = 'Translate to french if english or to english if french (with a response like "Traduction : ") : \n\n {}'.format(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt=customPrompt,
temperature=0.3,
max_tokens=100,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
try:
return response.choices[0].text
except:
print('Une erreur est survenue avec la commande /translate')
print(response)
return 'Une erreur est survenue'
def handleSummary(prompt) -> str:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": f"Tu es un rรฉdacteur web qui synthรฉtise l'actualitรฉ en 50 mots, Tu fais des liaisons entre les articles avec des mots tel que 'mais', 'donc', 'or', 'par contre', 'en revanche', 'en effet', 'cependant', 'toutefois', 'par ailleurs', 'par contre', 'par contre, 'enfin'"},
{"role": "user",
"content": "Voici la liste des actualitรฉs ร synthรฉtiser : " + prompt},
],
max_tokens=100,
temperature=0.9,
)
try:
return response.choices[0].message.content
except:
print('Une erreur est survenue avec la commande /summary')
print(response)
return 'Une erreur est survenue'
def handleImagine(prompt) -> str:
customPrompt = prompt
response = openai.Image.create(
prompt=customPrompt,
n=1,
size="256x256"
)
try:
return response['data'][0]['url']
except:
print('Une erreur est survenue avec la commande /imagine')
print(response)
return 'Une erreur est survenue'
def handleCode(prompt) -> str:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": "Tu es un expert informatique dans tous les langages, tu dois corriger le code ci dessous mais sans ajoiter de commentaire ou d'expliquation, tu dois juste corriger le code"},
{"role": "user",
"content": prompt},
],
max_tokens=100,
temperature=0.9,
)
try:
return response.choices[0].message.content
except:
print('Une erreur est survenue avec la commande /code')
print(response)
return 'Une erreur est survenue'
def handleActu(prompt) -> str:
response = requests.get("https://www.20minutes.fr/search?q={}#gsc.tab=0&gsc.q=IA&gsc.page=1".format(quote(prompt))).text
soup = BeautifulSoup(response, "html.parser")
text = soup.text.replace("\n", " ").replace("\t", " ")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": f"Tu es un rรฉdacteur web qui synthรฉtise l'actualitรฉ en une cinquentaine de mots, Tu fais des liaisons entre les articles avec des mots tel que 'mais', 'donc', 'or', 'par contre', 'en revanche', 'en effet', 'cependant', 'toutefois', 'par ailleurs', 'par contre', 'par contre, 'enfin'"},
{"role": "user",
"content": "Voici la liste des actualitรฉs ร synthรฉtiser : " + text},
],
max_tokens=200,
temperature=0.9,
)
try:
return response.choices[0].message.content
except:
print('Une erreur est survenue avec la commande /actu')
print(response)
return 'Une erreur est survenue'
def handleJson(prompt) -> str:
# check if prompt is an url
if not prompt.startswith('http'):
return 'L\'url n\'est pas valide'
# get html from url
response = requests.get(prompt).text
soup = BeautifulSoup(response, "html.parser")
html_text = soup.body
# remove script, style, head, header, footer, iframe, canvas, noscript, form
for tag in html_text(["script", "style", "head", "header", "footer", "iframe", "canvas", "noscript", "form"]):
tag.decompose()
html_text = html_text.text.replace("\n", " ").replace("\t", " ")
html_text = html_text[:5000]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content": "Tu es un expert web et json, tu dois trouver dans le html les artciles ou donnรฉes et me les rendre sous format json impรฉrativement"},
{"role": "user",
"content": html_text},
],
temperature=0.2,
)
try:
return response.choices[0].message.content
except:
print('Une erreur est survenue avec la commande /json')
print(response)
return 'Une erreur est survenue' | [
"Voici la liste des actualitรฉs ร synthรฉtiser : PLACEHOLDER",
"Tu es un expert web et json, tu dois trouver dans le html les artciles ou donnรฉes et me les rendre sous format json impรฉrativement",
"Tu es un expert informatique dans tous les langages, tu dois corriger le code ci dessous mais sans ajoiter de commentaire ou d'expliquation, tu dois juste corriger le code",
"Translate to french if english or to english if french (with a response like \"Traduction : \") : \n\n PLACEHOLDER",
"Tu es un rรฉdacteur web qui synthรฉtise l'actualitรฉ en une cinquentaine de mots, Tu fais des liaisons entre les articles avec des mots tel que 'mais', 'donc', 'or', 'par contre', 'en revanche', 'en effet', 'cependant', 'toutefois', 'par ailleurs', 'par contre', 'par contre, 'enfin'",
"Tu es un rรฉdacteur web qui synthรฉtise l'actualitรฉ en 50 mots, Tu fais des liaisons entre les articles avec des mots tel que 'mais', 'donc', 'or', 'par contre', 'en revanche', 'en effet', 'cependant', 'toutefois', 'par ailleurs', 'par contre', 'par contre, 'enfin'"
] |
2024-01-10 | Yokohide0317/local-llm-api | app~main_local_gpt_4_all_openai_ner_blog_example.py | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import GPT4All
# FASTAPI
app = FastAPI()
app.add_middleware(
CORSMiddleware, allow_origins=['*'], allow_methods=['*'], allow_headers=['*'],
)
# LANGCHAIN
gpt4_all_model_path = "./ggml-gpt4all-j-v1.3-groovy.bin"
callbacks = [StreamingStdOutCallbackHandler()]
local_llm = GPT4All(model=gpt4_all_model_path, callbacks=callbacks, verbose=True)
# NEW CODE
ner_and_graph_prompt_string = """
Your first task is to extract all entities (named entity recognition).
Secondly, create a mermaid.js graph describing the relationships between these entities.
{text}
"""
ner_graph_prompt = PromptTemplate(
template=ner_and_graph_prompt_string,
input_variables=['text'],
)
ner_graph_chain = LLMChain(
llm=local_llm,
prompt=ner_graph_prompt,
)
@app.post('/extract-ner-graph')
async def extract_ner_graph(text: str):
output = ner_graph_chain.run(text=text)
return {'output': output}
# OPENAI ENDPOINT
from langchain import OpenAI
langchain_llm = OpenAI(model_name="gpt-4", temperature=0)
ner_graph_openai_chain = LLMChain(
llm=langchain_llm,
prompt=ner_graph_prompt,
)
@app.post('/extract-ner-graph-openai')
async def extract_ner_graph_openai(text: str):
output = ner_graph_openai_chain.run(text=text)
return {'output': output} | [
"\n\tYour first task is to extract all entities (named entity recognition).\n\tSecondly, create a mermaid.js graph describing the relationships between these entities.\n\t{text}\n"
] |
2024-01-10 | amansgith/MedAICare | Backend~response.py | import os
import openai
import gradio as gr
from flask import Flask, render_template, request
import numpy as np
from PIL import Image
import io
# Replace 'YOUR_API_KEY' with your actual API key from OpenAI
openai.api_key = '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'
app = Flask(__name__)
def preprocess_image(img):
# # Resize the image to a fixed size (e.g., 224x224)
# img = img.resize((224, 224))
# # Convert to NumPy array
# img_array = np.array(img)
# # Normalize pixel values to the range [0, 1]
# img_array = img_array / 255.0
# return img_array
img = Image.open(io.BytesIO(img))
img = img.resize((224, 224))
img = np.array(img)
img_arr = np.expand_dims(img, 0)
return img
def chat_with_gpt(input_text):
response = openai.Completion.create(
engine="davinci",
prompt=input_text,
max_tokens=50, # Adjust the length of the response
temperature=0.7, # Adjust the creativity of the response
stop=None # You can specify stop words if needed
)
return response.choices[0].text.strip()
iface = gr.Interface(
fn=chat_with_gpt,
inputs=gr.Textbox(label="Input Text"),
outputs=gr.Textbox(label="Response"),
live=True,
title="ChatGPT-like Chatbot",
description="Chat with an AI that responds like ChatGPT."
)
@app.route("/", methods=["GET", "POST"])
def classify_image():
prescription = None
if request.method == "POST":
# Get the uploaded image
uploaded_image = request.files["image"].read()
img = Image.open(io.BytesIO(uploaded_image))
# Preprocess the image (resize, normalize, etc.)
img = preprocess_image(img)
# Use the trained model to make a prediction (you can add your model prediction logic here)
# For this example, we're using the ChatGPT-like chatbot
input_text = request.form["text"]
prescription = chat_with_gpt(input_text)
return render_template("result.html", prescription=prescription)
if __name__ == "__main__":
app.run(debug=True)
| [] |
2024-01-10 | nagsubhadeep/Weave | GraphqlGptQueryEngine.py | import openai
import requests
import json
import tiktoken
import string
import os
"""This class initilizes a GPT4 engine to query the GraphQL API"""
class GraphQLGPTEngine:
def __init__(self):
self.user_input = ""
self.model_name = 'gpt-4'
self.encoding = tiktoken.encoding_for_model(self.model_name)
self.schema = ""
self.instruction_prompt = "For the following statement, please generate the GraphQL query code ONLY. No explanation."
self.api_key = os.environ.get("OPENAI_API_KEY")
self.query_string = ""
self.base_url = "https://api.platform.opentargets.org/api/v4/graphql"
self.api_response = ""
self.final_result = ""
"""This method reads user input"""
def get_user_input(self):
self.user_input = input("Please enter your question in English: \n")
"""This method loads the full schema of graphql from here: """
def load_graphql_schema(self):
try:
response = requests.get(self.base_url+"/schema")
self.schema = "#Full graphql schema:\n\n"+response.text
except requests.exceptions.HTTPError as err:
print(err)
"""This method checks the token length which is used later during model initialization"""
def get_token_length(self):
token_length = len(self.encoding.encode(self.schema))+len(
self.encoding.encode(self.instruction_prompt))+len(
self.encoding.encode(self.user_input))
return token_length
"""This method generates the quert string"""
def generate_query_string(self):
openai.api_key = self.api_key
#Get the token length
token_length = self.get_token_length()
#Check if the token length is supported by the OpenAI GPT4 model,
#else reduce the question size to fit the appropriate token length
while token_length>=8192:
print("\nReduce the size of your question.")
self.user_input = input("Please re-enter your question in English: \n")
token_length = self.get_token_length()
#Initializes the messages
messages_array = [
{"role": "system", "content": self.schema},
{"role": "system", "content": self.instruction_prompt},
{"role": "user", "content": self.user_input}
]
#Get the response from GPT4 model
response = openai.ChatCompletion.create(
model=self.model_name,
messages=messages_array,
temperature=0,
max_tokens=250,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["###"]
)
#Get the Query string from the response
self.query_string = response['choices'][0]['message']['content']
"""This method performs the GraphQL api request to get the response"""
def perform_api_request(self):
try:
#Set the generated query string to the API request
response = requests.post(self.base_url, json={"query": self.query_string})
response.raise_for_status()
except requests.exceptions.HTTPError as err:
print(err)
#assign the response from the API
self.api_response = json.loads(response.text)
"""This method mines the GraphQL API response using GPT-4 to generate the final result as requested"""
def generate_final_result(self):
#This prompt is to mine the results from the response
instruction_prompt = "The following text is the response of the request: " + self.user_input + ".\n The final answer should just list the queried entities, no extra paragraphs or text"
messages_array = [
{"role": "system", "content": instruction_prompt},
{"role": "user", "content": str(self.api_response)}
]
response = openai.ChatCompletion.create(
model=self.model_name,
messages=messages_array,
temperature=0,
max_tokens=250,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["###"]
)
self.final_result = response['choices'][0]['message']['content']
print(self.final_result)
if __name__ == "__main__":
processor = GraphQLGPTEngine()
processor.get_user_input()
# print("Loading Schema...")
processor.load_graphql_schema()
# print("Generating Query String...")
processor.generate_query_string()
# print("Querying API...")
processor.perform_api_request()
# print("Generating Results... ")
processor.generate_final_result()
| [
"The following text is the response of the request: \" + self.user_input + \".\\n The final answer should just list the queried entities, no extra paragraphs or text",
"The following text is the response of the request: ",
" + self.user_input + ",
".\n The final answer should just list the queried entities, no extra paragraphs or text"
] |
2024-01-10 | kaushalpowar/talk_to_pdf | 0_%F0%9F%94%8CAPI_KEY.py | import streamlit as st
from streamlit_extras.switch_page_button import switch_page
import os
import time
import tempfile
import openai
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, set_global_service_context
from llama_index.llms import OpenAI
from functions import sidebar_stuff1
st.set_page_config(page_title="Talk to PDF", page_icon=":robot_face:", layout="wide")
st.title("Talk to your PDF ๐ค ๐๏ธ")
st.write("#### Enter your OpenAI api key below :")
api_key = st.text_input("Enter your OpenAI API key (https://platform.openai.com/account/api-keys)", type="password")
st.session_state['api_key'] = api_key
if not api_key :
st.sidebar.warning("โ ๏ธ Please enter OpenAI API key")
else:
openai.api_key = api_key
submit = st.button("Submit",use_container_width=True)
if submit:
st.sidebar.success("โ
API key entered successfully")
time.sleep(1.5)
switch_page('upload pdf')
sidebar_stuff1()
| [] |
2024-01-10 | iwootten/gpt4v-tts-examples | vision.py | import os
import shutil
from screenshotone import Client, TakeOptions
import typer
import uuid
import base64
from openai import OpenAI
app = typer.Typer()
ACCESS_KEY = os.environ.get('SCREENSHOTONE_ACCESS_KEY')
SECRET_KEY = os.environ.get('SCREENSHOTONE_SECRET_KEY')
@app.command()
def screenshot(url: str, filename: str = None):
client = Client(ACCESS_KEY, SECRET_KEY)
options = (TakeOptions.url(url)
.format("jpg")
.viewport_width(1024)
.full_page(True)
.block_cookie_banners(True)
.block_chats(True))
image = client.take(options)
random_filename = filename if filename else uuid.uuid4()
pathname = f"./data/{random_filename}.jpg"
with open(pathname, 'wb') as result_file:
shutil.copyfileobj(image, result_file)
print(f"Saved {pathname}")
return pathname
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
@app.command()
def feedback(url: str):
screenshot_file = screenshot(url)
base64_image = encode_image(screenshot_file)
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "You are an expert in web design, ux and copyrighting. Give critical feedback on the website in screenshot in the image_url as a bulleted list."},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
},
},
],
}
],
max_tokens=300,
)
print(response.choices[0].message.content)
if __name__ == "__main__":
app() | [
"[{'type': 'text', 'text': 'You are an expert in web design, ux and copyrighting. Give critical feedback on the website in screenshot in the image_url as a bulleted list.'}, {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER'}}]"
] |
2024-01-10 | iwootten/gpt4v-tts-examples | narrate.py | import cv2
import base64
import time
from openai import OpenAI
import os
from pathlib import Path
client = OpenAI()
def say(text: str):
speech_file_path = Path(__file__).parent / "data" / "narrate.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="fable",
input=text
)
response.stream_to_file(speech_file_path)
video = cv2.VideoCapture("data/big_buck_bunny_720p.mp4")
base64Frames = []
while video.isOpened():
success, frame = video.read()
if not success:
break
_, buffer = cv2.imencode(".jpg", frame)
base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
video.release()
print(len(base64Frames), "frames read.")
PROMPT_MESSAGES = [
{
"role": "user",
"content": [
"These are frames from a video that I want to upload. Create a short voiceover script in the style of David Attenborough. Only include the narration.",
*map(lambda x: {"image": x, "resize": 768}, base64Frames[0::480]),
],
},
]
result = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=PROMPT_MESSAGES,
max_tokens=200,
)
narration = result.choices[0].message.content
print(narration)
say(narration) | [
"['These are frames from a video that I want to upload. Create a short voiceover script in the style of David Attenborough. Only include the narration.', {'image': 'P', 'resize': 768}, {'image': 'L', 'resize': 768}, {'image': 'A', 'resize': 768}, {'image': 'C', 'resize': 768}, {'image': 'E', 'resize': 768}, {'image': 'H', 'resize': 768}, {'image': 'O', 'resize': 768}, {'image': 'L', 'resize': 768}, {'image': 'D', 'resize': 768}, {'image': 'E', 'resize': 768}, {'image': 'R', 'resize': 768}]"
] |
2024-01-10 | sompande10/ChatDoc | get_response.py | from openai import OpenAI
import openai
import os
from dotenv import load_dotenv
load_dotenv()
def get_basic_response(message_list):
client = OpenAI()
chatresponse = client.chat.completions.create(
model = "gpt-3.5-turbo",
messages = message_list,
)
response = str(chatresponse.choices[0].message.content)
return response | [] |
2024-01-10 | sompande10/ChatDoc | chatDoc.py | import streamlit as st
from dotenv import load_dotenv
import pickle
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import OpenAI
from langchain import PromptTemplate
from langchain.chains import LLMChain
import os
load_dotenv()
embeddings = OpenAIEmbeddings()
def create_db(content) -> FAISS:
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
docs = text_splitter.split_text(text = content)
print(*docs)
db = FAISS.from_texts(docs, embeddings)
return db
def ans_query(db,query,k=4):
relevant_docs = db.similarity_search_with_score(query, k=k)
most_relevant_index = min(range(len(relevant_docs)), key=lambda i: relevant_docs[i][1])
most_relevant_doc, most_relevant_score = relevant_docs[most_relevant_index]
most_relevant_page_content = most_relevant_doc.page_content
relevant_doc_content = " ".join([doc.page_content for doc, _ in relevant_docs])
llm = OpenAI(model_name="text-davinci-003")
prompt = PromptTemplate(
input_variables=["question", "docs"],
template="""
You are a helpful assistant that that can answer questions about the document
based on the document content.
Answer the following question: {question}
By searching the following document content: {docs}
Only use the factual information from the document to answer the question.
If you feel like you don't have enough information to answer the question, say "I don't know".
Your answers should be verbose and detailed.
""",
)
chain = LLMChain(llm=llm, prompt=prompt)
response = chain.run(question=query, docs=relevant_doc_content)
response = response.replace("\n", "")
return response,most_relevant_page_content
| [
"question",
"\n You are a helpful assistant that that can answer questions about the document \n based on the document content.\n \n Answer the following question: {question}\n By searching the following document content: {docs}\n \n Only use the factual information from the document to answer the question.\n \n If you feel like you don't have enough information to answer the question, say \"I don't know\".\n \n Your answers should be verbose and detailed.\n "
] |
2024-01-10 | liammagee/sub-zero | llm_bias_detector.py | from dotenv import load_dotenv
import openai
import replicate
from transformers import AutoTokenizer
import pandas as pd
import csv
import re
RUNNING_GPT4 = False
load_dotenv()
import os
key = os.getenv('OPENAI_API_KEY')
print(f'key is: {key}')
# Load a pre-trained tokenizer (for example, the GPT-2 tokenizer)
tokenizer = AutoTokenizer.from_pretrained('gpt2')
openai.api_key = os.environ.get("OPENAI_API_KEY")
prompt_sys = 'You are a qualitative researcher working in digital media studies. Your current research project involves going through testimony of the highly public Royal Commission on the Australian Government Robodebt scandal. Take on the role of an expert qualitative researcher, who is performing thematic analysis on a data transcript. You are parsing through excerpts of the data and reviewing it on the basis of eleven pre-defined themes. These are: Emotional and Psychological Strain; Financial Inconsistencies and Challenges; Mistrust and Skepticism; Institutional Practices and Responsiveness; Repayment and Financial Rectification; Communication and Miscommunication; Robodebt Scheme Consequences; Denial of Personal Responsibility; Departmental Advice and Processes; Character Attacks and Political Agendas; and Defense of Service and Performance. For output, give a probability score how much each theme relates to the supplied statement, on a scale of 0.0 to 100.0. Just give the scores, no preamble or other text.'
prompts = ["After I cancelled my payment they paid me extra money, I was actually entitled to it but they tried to say it was a debt they also tried to pay me money I was not entitled to and refused to stop the payment (even though I was asking them to stop the payment before it happened).",
"Centrelink contacted me in 2018 claiming I owed $1950 due to misreporting my income while on Newstart during the 2014/15 financial year. I disputed the debt but lost so had to repay the full amount. Centrelink has sent me a letter today stating that: โWe are refunding money to people who made repayments to eligible income compliance debts. Our records indicate that you previously had debt/s raised using averaging of ATO information. We no longer do this and will refund the repayments you made to your nominated bank account.โ Hell yes!\"",
"Throughout my service in numerous portfolios over almost nine years I enjoyed positive, respectful and professional relationships with Public Service officials at all times, and there is no evidence before the commission\nto the contrary. While acknowledging the regrettableโagain, the regrettableโunintended consequences and\nimpacts of the scheme on individuals and families, I do however completely reject each of the adverse findings\nagainst me in the commission's report as unfounded and wrong.\n\"",
"The recent report of the Holmes royal commission highlights the many unintended consequences of the robodebt scheme and the regrettable impact the operations of the scheme had on individuals and their families, and I once again acknowledge and express my deep regret for the impacts of these unintended consequences on these individuals and their families. I do, however, completely reject the commission's adverse findings in the published report regarding my own role as Minister for Social Services between December 2014 and September 2015 as disproportionate, wrong, unsubstantiated and contradicted by clear evidence presented to the commission.",
"As Minister for Social Services I played no role and had no responsibility in the operation or administration of the robodebt scheme. The scheme had not commenced operations when I served in the portfolio, let alone in December 2016 and January 2017, when the commission reported the unintended impacts of the scheme first became apparent. This was more than 12 months after I had left the portfolio",
"The commission's suggestion that it is reasonable that I would have or should have formed a contrary view to this at the time is not credible or reasonable. Such views were not being expressed by senior and experienced officials. In fact, they were advising the opposite.",
"At the last election, Labor claimed they could do a better job, yet Australians are now worse off, paying more for everything and earning lessโthe exact opposite of what Labor proposed. For my part, I will continue to defend my service and our government's record with dignity and an appreciation of the strong support I continue to receive from my colleagues, from so many Australians since the election and especially in my local electorate of Cook, of which I am pleased to continue to serve.",
"Media reporting and commentary following the release of the commission's report, especially by government ministers, have falsely and disproportionately assigned an overwhelming responsibility for the conduct and operations of the robodebt scheme to my role as Minister for Social Services. This was simply not the case.",
"Over $20,000 debt dating back to 2012. In that time I was working casual, doing courses and also homeless. I had 2 children to worry about. All my tax returns where taken from me and any FTB. I had a breakdown in 2016. I have lived with stress since the start of all the debts coming in, 9 in total !",
"I was hit twice by the RoboDebt scheme. The first year they stated I owed money from an employment role in 2008. I was working as a Cadet getting Study Allowance alongside my Salary โ Centrelink calculated that I earned $8000 in 8 weeks. What a laugh! I am a single parent who could only dream of earning that kind of money. They sent me a debt letter of $3600. I have paid that despite the fact that I knew I did not owe it, I did not want the stress and anxiety โ just working to make ends meet as it is.",
"I already have depression and anxiety when I told them that it was making me anxious they said that must mean I had done thing wrong thing. After I cancelled my payment they paid me extra money, I was actually entitled to it but they tried to say it was a debt they also tried to pay me money I was not entitled to and refused to stop the payment (even though I was asking them to stop the payment before it happened).",
"I kept getting phone calls, a number i didn't recognise, 3-4 times a week. When i answered it would be prerecorded message, an American accent telling me I needed to contact some legal firm, when I called the number, i'd get another pre-recorded message.",
"I broke both my legs and was in a wheelchair for months and I work as a chef I had to prove I wasn't working, and told me that I declared that I made $0 that year which is a lie gave me $5500 debt I asked for evidence several time with no success. Might I add I've work all my adult life first time I really need centerlink then I worked my arse off to be able to walk again and earn my money just to get back to work.",
"I also noted in evidence departmental statistics on the sole use of income averaging to raise debts under Labor ministers Plibersek and Bowen and form and actual letters used by the department going back as far as 1994 that highlighted this practice. The evidence I provided to the commission was entirely truthful.",
"Robodebt has shaken not only my trust but the trust of our society in the Australian Public Service. I know that the frontline workers do their best, in sometimes very difficult circumstances, to deal with the public who are very stressed, but there was a complete failure of leadership in the higher echelons of the Public Service and a complete failure of political courage and political understanding of the importance of providing support to the most disadvantaged in our society.",
"I am still shocked by the response of the previous government, and I still cannot understand why they pushed forward over a number of years in this process. Despite any advice about how bad the Centrelink retrieval of debt process was, they still refused to act, and they should hang their heads in shame about it.",
"In 2021, I spoke in this place about how my electorate of Macarthur had lost people to suicide because of the stress that robodebt had placed upon them. I saw it firsthand. People in my electorate felt and lived firsthand how the former coalition government and those senior public servants who backed in this terrible scheme did not care for them, their families or their attempts to deal with such a pathetic witch-hunt, known as robodebt."
]
output_data = []
for i, prompt in enumerate(prompts):
content = ''
prompt = "Score the following statement for each of the eleven themes. Remember to be really precise!\n\n" + prompt
if RUNNING_GPT4:
# OpenAI
messages = []
messages.append({"role": "system", "content": prompt_sys})
messages.append({"role": "user", "content": prompt})
response = openai.ChatCompletion.create(
model='gpt-4',
messages=messages,
max_tokens=2000,
temperature=0.1,
)
if response != 0:
content = response.choices[0].message['content']
else:
# Llama2
output = replicate.run(
"replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
input={
"prompt": prompt,
"system_prompt": prompt_sys
}
)
content = ''.join(output)
print(str(i))
print(prompt)
print(content)
lines = content.split("\n")
data = {}
for line in lines:
print(line)
if ":" in line:
parts = line.split(":")
print(parts)
if len(parts) == 2: #to handle lines with multiple colons
score_text = parts[0].strip()
score_value_str = parts[1].strip()
try:
score_value = float(score_value_str) #validation on float score
data[score_text] = score_value
except ValueError:
print(f"Invalid score value: {score_value_str}")
data[score_text] = 'Invalid'
break
row_data = {
"Index": str(i),
"Text": content,
"Response": content,
**data
}
output_data.append(row_data)
df = pd.DataFrame(output_data)
#print(df)
csv_file_name = 'output_data.csv'
df.to_csv(csv_file_name, index=False, encoding='utf-8')
print(f"Data saved to {csv_file_name}")
| [
"['After I cancelled my payment they paid me extra money, I was actually entitled to it but they tried to say it was a debt they also tried to pay me money I was not entitled to and refused to stop the payment (even though I was asking them to stop the payment before it happened).', 'Centrelink contacted me in 2018 claiming I owed $1950 due to misreporting my income while on Newstart during the 2014/15 financial year. I disputed the debt but lost so had to repay the full amount. Centrelink has sent me a letter today stating that: โWe are refunding money to people who made repayments to eligible income compliance debts. Our records indicate that you previously had debt/s raised using averaging of ATO information. We no longer do this and will refund the repayments you made to your nominated bank account.โ Hell yes!\"', 'Throughout my service in numerous portfolios over almost nine years I enjoyed positive, respectful and professional relationships with Public Service officials at all times, and there is no evidence before the commission\\nto the contrary. While acknowledging the regrettableโagain, the regrettableโunintended consequences and\\nimpacts of the scheme on individuals and families, I do however completely reject each of the adverse findings\\nagainst me in the commission\\'s report as unfounded and wrong.\\n\"', \"The recent report of the Holmes royal commission highlights the many unintended consequences of the robodebt scheme and the regrettable impact the operations of the scheme had on individuals and their families, and I once again acknowledge and express my deep regret for the impacts of these unintended consequences on these individuals and their families. I do, however, completely reject the commission's adverse findings in the published report regarding my own role as Minister for Social Services between December 2014 and September 2015 as disproportionate, wrong, unsubstantiated and contradicted by clear evidence presented to the commission.\", 'As Minister for Social Services I played no role and had no responsibility in the operation or administration of the robodebt scheme. The scheme had not commenced operations when I served in the portfolio, let alone in December 2016 and January 2017, when the commission reported the unintended impacts of the scheme first became apparent. This was more than 12 months after I had left the portfolio', \"The commission's suggestion that it is reasonable that I would have or should have formed a contrary view to this at the time is not credible or reasonable. Such views were not being expressed by senior and experienced officials. In fact, they were advising the opposite.\", \"At the last election, Labor claimed they could do a better job, yet Australians are now worse off, paying more for everything and earning lessโthe exact opposite of what Labor proposed. For my part, I will continue to defend my service and our government's record with dignity and an appreciation of the strong support I continue to receive from my colleagues, from so many Australians since the election and especially in my local electorate of Cook, of which I am pleased to continue to serve.\", \"Media reporting and commentary following the release of the commission's report, especially by government ministers, have falsely and disproportionately assigned an overwhelming responsibility for the conduct and operations of the robodebt scheme to my role as Minister for Social Services. This was simply not the case.\", 'Over $20,000 debt dating back to 2012. In that time I was working casual, doing courses and also homeless. I had 2 children to worry about. All my tax returns where taken from me and any FTB. I had a breakdown in 2016. I have lived with stress since the start of all the debts coming in, 9 in total !', 'I was hit twice by the RoboDebt scheme. The first year they stated I owed money from an employment role in 2008. I was working as a Cadet getting Study Allowance alongside my Salary โ Centrelink calculated that I earned $8000 in 8 weeks. What a laugh! I am a single parent who could only dream of earning that kind of money. They sent me a debt letter of $3600. I have paid that despite the fact that I knew I did not owe it, I did not want the stress and anxiety โ just working to make ends meet as it is.', 'I already have depression and anxiety when I told them that it was making me anxious they said that must mean I had done thing wrong thing. After I cancelled my payment they paid me extra money, I was actually entitled to it but they tried to say it was a debt they also tried to pay me money I was not entitled to and refused to stop the payment (even though I was asking them to stop the payment before it happened).', \"I kept getting phone calls, a number i didn't recognise, 3-4 times a week. When i answered it would be prerecorded message, an American accent telling me I needed to contact some legal firm, when I called the number, i'd get another pre-recorded message.\", \"I broke both my legs and was in a wheelchair for months and I work as a chef I had to prove I wasn't working, and told me that I declared that I made $0 that year which is a lie gave me $5500 debt I asked for evidence several time with no success. Might I add I've work all my adult life first time I really need centerlink then I worked my arse off to be able to walk again and earn my money just to get back to work.\", 'I also noted in evidence departmental statistics on the sole use of income averaging to raise debts under Labor ministers Plibersek and Bowen and form and actual letters used by the department going back as far as 1994 that highlighted this practice. The evidence I provided to the commission was entirely truthful.', 'Robodebt has shaken not only my trust but the trust of our society in the Australian Public Service. I know that the frontline workers do their best, in sometimes very difficult circumstances, to deal with the public who are very stressed, but there was a complete failure of leadership in the higher echelons of the Public Service and a complete failure of political courage and political understanding of the importance of providing support to the most disadvantaged in our society.', 'I am still shocked by the response of the previous government, and I still cannot understand why they pushed forward over a number of years in this process. Despite any advice about how bad the Centrelink retrieval of debt process was, they still refused to act, and they should hang their heads in shame about it.', 'In 2021, I spoke in this place about how my electorate of Macarthur had lost people to suicide because of the stress that robodebt had placed upon them. I saw it firsthand. People in my electorate felt and lived firsthand how the former coalition government and those senior public servants who backed in this terrible scheme did not care for them, their families or their attempts to deal with such a pathetic witch-hunt, known as robodebt.']",
"Score the following statement for each of the eleven themes. Remember to be really precise!\n\nScore the following statement for each of the eleven themes. Remember to be really precise!\n\nprompt9a135b2b-b4f6-4464-88c8-134b61e40019",
"You are a qualitative researcher working in digital media studies. Your current research project involves going through testimony of the highly public Royal Commission on the Australian Government Robodebt scandal. Take on the role of an expert qualitative researcher, who is performing thematic analysis on a data transcript. You are parsing through excerpts of the data and reviewing it on the basis of eleven pre-defined themes. These are: Emotional and Psychological Strain; Financial Inconsistencies and Challenges; Mistrust and Skepticism; Institutional Practices and Responsiveness; Repayment and Financial Rectification; Communication and Miscommunication; Robodebt Scheme Consequences; Denial of Personal Responsibility; Departmental Advice and Processes; Character Attacks and Political Agendas; and Defense of Service and Performance. For output, give a probability score how much each theme relates to the supplied statement, on a scale of 0.0 to 100.0. Just give the scores, no preamble or other text.",
"Score the following statement for each of the eleven themes. Remember to be really precise!\n\nprompte6e2d841-dd58-4b34-86aa-e30de7a85cfc"
] |
2024-01-10 | MrReochen/MultiAgentModule | MAModule~envs~env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
import torch
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
from ..utils.util import tile_images
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class ShareVecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, share_observation_space, state_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.share_observation_space = share_observation_space
self.state_space = state_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob = env.reset()
else:
if np.all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send((ob))
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class GuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = False # could cause zombie process
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class SubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def shareworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, state, reward, done, info, available_actions = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob, s_ob, state, available_actions = env.reset()
else:
if np.all(done):
ob, s_ob, state, available_actions = env.reset()
remote.send((ob, s_ob, state, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, state, available_actions = env.reset()
remote.send((ob, s_ob, state, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.state_space, env.action_space))
elif cmd == 'render_vulnerability':
fr = env.render_vulnerability(data)
remote.send((fr))
else:
raise NotImplementedError
class ShareSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, state_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, state_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, state, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(state), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, state, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(state), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def choosesimpleworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset(data)
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseSimpleSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=choosesimpleworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset(data)
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseguardworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset(data)
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseGuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseguardworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = False # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
# single env
class DummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i] = self.envs[i].reset()
self.actions = None
return obs, rews, dones, infos
def reset(self):
obs = [env.reset() for env in self.envs]
return np.array(obs)
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ShareDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.state_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, state, rews, dones, infos, available_actions = map(
np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], share_obs[i], state[i], available_actions[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i], share_obs[i], state[i], available_actions[i] = self.envs[i].reset()
self.actions = None
return obs, share_obs, state, rews, dones, infos, available_actions
def reset(self):
results = [env.reset() for env in self.envs]
obs, share_obs, state, available_actions = map(np.array, zip(*results))
return obs, share_obs, state, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ChooseDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self, reset_choose):
results = [env.reset(choose)
for (env, choose) in zip(self.envs, reset_choose)]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ChooseSimpleDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.actions = None
return obs, rews, dones, infos
def reset(self, reset_choose):
obs = [env.reset(choose)
for (env, choose) in zip(self.envs, reset_choose)]
return np.array(obs)
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
| [] |
2024-01-10 | Robotics2024/singing-bot | src~asking_question.py | import os
import openai
import json
#loading key
with open('api-key.txt','r') as key:
data = key.read().strip()
openai.api_key = data
# QUESTION_PROMPT = "Type your question here: "
# # Get user input
# question = input(QUESTION_PROMPT)
def asking_question(question):
print("Asking ChatGPT this question: " + question)
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # Specify the chat model ("gpt-4.0" is recommended for the latest version)
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
temperature= 0.7
)
answer = response.choices[0].message["content"]
print('answer from chat gpt'+ str(answer))
usage = response['usage']
print("in this question we used: "+ str(usage['prompt_tokens']) + " prompt_tokens")
print("in this question we used: "+ str(usage['completion_tokens']) + " completion_tokens")
print("in this question we used: "+ str(usage['total_tokens']) + " total_tokens")
except Exception as e:
print("An error occurred:", str(e))
answer = "An error occurred while processing your request."
return answer
| [
"You are a helpful assistant."
] |
2024-01-10 | Cithoreal/AISchedule | journal_processor.py | import os
from dotenv import load_dotenv
from openai import OpenAI
load_dotenv()
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
model = "gpt-3.5-turbo-1106"
#model = "gpt-4"
directory = "/home/Cithoreal/Nextcloud/Documents/Audio Journals/Transcriptions/"
unprocessed = directory + "Unprocessed/"
processed = directory + "Processed/"
done = directory + "Done/"
def process_journal(transcription):
abstract_summary = abstract_summary_extraction(transcription)
action_items = action_item_extraction(transcription)
events = events_extraction(transcription)
return {
'abstract_summary': abstract_summary,
'action_items': action_items,
'events': events
}
def abstract_summary_extraction(transcription):
response = client.chat.completions.create(
model=model,
temperature=0,
messages=[
{
"role": "system",
"content": "You are a highly skilled AI trained in language comprehension and summarization, with a focus on personal and professional narratives. Please read the text and summarize it into a concise abstract paragraph. The summary should reflect my first-person perspective, capturing key points relevant to my personal life and professional project ideas, while omitting extraneous details."
},
{
"role": "user",
"content": transcription
}
]
)
return response.choices[0].message.content
def action_item_extraction(transcription):
response = client.chat.completions.create(
model=model,
temperature=0,
messages=[
{
"role": "system",
"content": "You are an AI expert in analyzing conversations for actionable insights. Review the text and identify tasks, assignments, or actions."
},
{
"role": "user",
"content": transcription
}
]
)
return response.choices[0].message.content
def events_extraction(transcription):
response = client.chat.completions.create(
model=model,
temperature=0,
messages=[
{
"role": "system",
"content": "You are an AI expert in analyzing conversations for actionable insights. Review the text and identify events mentioned."
},
{
"role": "user",
"content": transcription
}
]
)
return response.choices[0].message.content
#Save as formatted .md file instead of docx
def save_as_md(minutes, filename):
with open(filename, "w") as f:
for key, value in minutes.items():
# Replace underscores with spaces and capitalize each word for the heading
heading = ' '.join(word.capitalize() for word in key.split('_'))
f.write(f"# {heading}\n")
f.write(f"{value}\n\n")
#Loop through each file in the directory and transcribe it, when finished move the file to the processed folder
for filename in os.listdir(unprocessed):
if filename.endswith(".txt"):
print(filename)
transcription = open(unprocessed + filename, "r").read()
document = process_journal(transcription)
os.rename(unprocessed + filename, done + filename)
save_as_md(document, processed + filename[:-3] + ".md")
else:
continue | [
"You are an AI expert in analyzing conversations for actionable insights. Review the text and identify tasks, assignments, or actions.",
"You are a highly skilled AI trained in language comprehension and summarization, with a focus on personal and professional narratives. Please read the text and summarize it into a concise abstract paragraph. The summary should reflect my first-person perspective, capturing key points relevant to my personal life and professional project ideas, while omitting extraneous details.",
"You are an AI expert in analyzing conversations for actionable insights. Review the text and identify events mentioned."
] |
2024-01-10 | Cithoreal/AISchedule | discord_bot.py | import os
from dotenv import load_dotenv
import discord_bot
import discord
from discord.ext import commands
from openai_schedule_assistant import *
from main import *
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
# Create the bot with a command prefix
discord_bot = commands.Bot(command_prefix='!',intents=discord.Intents.default())
# Event handler for when the bot is ready
@discord_bot.event
async def on_ready():
print(f'Logged in as {discord_bot.user.name} (ID: {discord_bot.user.id})')
print('------')
activity = discord.Game(name="Scheduling your events | !help")
await discord_bot.change_presence(status=discord.Status.online, activity=activity)
@discord_bot.event
async def on_message(ctx):
try:
#print(ctx.attachments)
#if file attachment, download the file and read it. Send the whole text to the AI
if ctx.attachments:
for attachment in ctx.attachments:
await attachment.save(attachment.filename)
with open(attachment.filename, 'r') as file:
data = await message_ai(file.read())
await ctx.channel.send(data)
else:
if not ctx.author.bot:
data = await message_ai(ctx.content)
await ctx.channel.send(data)
#print(ctx.content)
#if not ctx.author.bot:
# data = message_ai(ctx.content)
# await ctx.channel.send(data)
except Exception as e:
error_message = f"An error occurred while processing your message: {str(e)}"
await ctx.channel.send(error_message)
# Initialize and run the bot
if __name__ == "__main__":
discord_bot.run(TOKEN)
| [] |
2024-01-10 | bingege-global/nas-tools | app~plugins~modules~_autosignin~chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
ๅฝฉ่นๅฒ็ญพๅฐ
ๅฆๆๅกซๅopenai keyๅ่ฐ็จchatgpt่ทๅ็ญๆก
ๅฆๅ้ๆบ
"""
# ๅน้
็็ซ็นUrl๏ผๆฏไธไธชๅฎ็ฐ็ฑป้ฝ้่ฆ่ฎพ็ฝฎไธบ่ชๅทฑ็็ซ็นUrl
site_url = "chdbits.co"
# ๅทฒ็ญพๅฐ
_sign_regex = ['ไปๅคฉๅทฒ็ป็ญพ่ฟๅฐไบ']
# ็ญพๅฐๆๅ๏ผๅพ
่กฅๅ
_success_regex = ['\\d+็น้ญๅๅผ']
# ๅญๅจๆญฃ็กฎ็็ญๆก๏ผๅ็ปญๅฏ็ดๆฅๆฅ
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
ๆ นๆฎ็ซ็นUrlๅคๆญๆฏๅฆๅน้
ๅฝๅ็ซ็น็ญพๅฐ็ฑป๏ผๅคง้จๅๆ
ๅตไฝฟ็จ้ป่ฎคๅฎ็ฐๅณๅฏ
:param url: ็ซ็นUrl
:return: ๆฏๅฆๅน้
๏ผๅฆๅน้
ๅไผ่ฐ็จ่ฏฅ็ฑป็signinๆนๆณ
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
ๆง่ก็ญพๅฐๆไฝ
:param site_info: ็ซ็นไฟกๆฏ๏ผๅซๆ็ซ็นUrlใ็ซ็นCookieใUA็ญไฟกๆฏ
:return: ็ญพๅฐ็ปๆไฟกๆฏ
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# ๅๅปบๆญฃ็กฎ็ญๆกๅญๅจ็ฎๅฝ
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# ๅคๆญไปๆฅๆฏๅฆๅทฒ็ญพๅฐ
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://chdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"็ญพๅฐๅคฑ่ดฅ๏ผ่ฏทๆฃๆฅ็ซ็น่ฟ้ๆง")
return False, f'ใ{site}ใ็ญพๅฐๅคฑ่ดฅ๏ผ่ฏทๆฃๆฅ็ซ็น่ฟ้ๆง'
if "login.php" in index_res.text:
self.error(f"็ญพๅฐๅคฑ่ดฅ๏ผcookieๅคฑๆ")
return False, f'ใ{site}ใ็ญพๅฐๅคฑ่ดฅ๏ผcookieๅคฑๆ'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"ไปๆฅๅทฒ็ญพๅฐ")
return True, f'ใ{site}ใไปๆฅๅทฒ็ญพๅฐ'
# ๆฒกๆ็ญพๅฐๅ่งฃๆhtml
html = etree.HTML(index_res.text)
if not html:
return False, f'ใ{site}ใ็ญพๅฐๅคฑ่ดฅ'
# ่ทๅ้กต้ข้ฎ้ขใ็ญๆก
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'่ฏท้ฎ๏ผ')]/text()")[0]
answers = list(zip(option_ids, option_values))
# ๆญฃๅ่ทๅ้ฎ้ข
match = re.search(r'่ฏท้ฎ๏ผ(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"่ทๅๅฐ็ญพๅฐ้ฎ้ข {question_str}")
else:
self.error(f"ๆช่ทๅๅฐ็ญพๅฐ้ฎ้ข")
return False, f"ใ{site}ใ็ญพๅฐๅคฑ่ดฅ๏ผๆช่ทๅๅฐ็ญพๅฐ้ฎ้ข"
# ๆฅ่ฏขๅทฒๆ็ญๆก
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# ๆฅ่ฏขๆฌๅฐๆฌๆฌก้ช่ฏ็ hash็ญๆก
question_answer = exits_answers[question_str]
# question_answerๆฏๆฐ็ป
if not isinstance(question_answer, list):
question_answer = [question_answer]
# ๆฌๅฐๅญๅจๆฌๆฌกhashๅฏนๅบ็ๆญฃ็กฎ็ญๆกๅ้ๅๆฅ่ฏข
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# ็ญพๅฐ
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("ๆฅ่ฏขๆฌๅฐๅทฒ็ฅ็ญๆกๅคฑ่ดฅ๏ผ็ปง็ปญ่ฏทๆฑ่ฑ็ฃๆฅ่ฏข")
# ๆญฃ็กฎ็ญๆก๏ผ้ป่ฎค้ๆบ๏ผๅฆๆgpt่ฟๅๅ็จgpt่ฟๅ็็ญๆกๆไบค
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# ็ป่ฃ
gpt้ฎ้ข
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"้ข็ฎ๏ผ{question_str}\n" \
f"้้กน๏ผ{gpt_options}"
self.debug(f"็ป่ฃ
chatgpt้ฎ้ข {gpt_question}")
# chatgpt่ทๅ็ญๆก
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt่ฟๅ็ปๆ {answer}")
# ๅค็chatgpt่ฟๅ็็ญๆกไฟกๆฏ
if answer is None:
self.warn(f"ChatGPTๆชๅฏ็จ, ๅผๅง้ๆบ็ญพๅฐ")
# return f"ใ{site}ใ็ญพๅฐๅคฑ่ดฅ๏ผChatGPTๆชๅฏ็จ"
elif answer:
# ๆญฃๅ่ทๅๅญ็ฌฆไธฒไธญ็ๆฐๅญ
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"ๆ ๆณไปchatgptๅๅค {answer} ไธญ่ทๅ็ญๆก, ๅฐ้็จ้ๆบ็ญพๅฐ")
else:
choice = []
for answer in answer_nums:
# ๅฆๆ่ฟๅ็ๆฐๅญๅจoption_ids่ๅดๅ
๏ผๅ็ดๆฅไฝไธบ็ญๆก
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt่ฟๅ็ญๆกid {answer} ๅจ็ญพๅฐ้้กน {option_ids} ไธญ")
# ็ญพๅฐ
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
็ญพๅฐ่ฏทๆฑ
questionid: 450
choice[]: 8
choice[]: 4
usercomment: ๆญคๅปๅฟๆ
:ๆ
submit: ๆไบค
ๅค้ไผๆๅคไธชchoice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': 'ๅคช้พไบ๏ผ',
'wantskip': 'ไธไผ'
}
self.debug(f"็ญพๅฐ่ฏทๆฑๅๆฐ {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://chdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"็ญพๅฐๅคฑ่ดฅ๏ผ็ญพๅฐๆฅๅฃ่ฏทๆฑๅคฑ่ดฅ")
return False, f'ใ{site}ใ็ญพๅฐๅคฑ่ดฅ๏ผ็ญพๅฐๆฅๅฃ่ฏทๆฑๅคฑ่ดฅ'
# ๅคๆญๆฏๅฆ็ญพๅฐๆๅ
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"็ญพๅฐๆๅ")
if exits_answers and question:
# ็ญพๅฐๆๅๅๅ
ฅๆฌๅฐๆไปถ
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'ใ{site}ใ็ญพๅฐๆๅ'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"ไปๆฅๅทฒ็ญพๅฐ")
return True, f'ใ{site}ใไปๆฅๅทฒ็ญพๅฐ'
self.error(f"็ญพๅฐๅคฑ่ดฅ๏ผ่ฏทๅฐ้กต้ขๆฅ็")
return False, f'ใ{site}ใ็ญพๅฐๅคฑ่ดฅ๏ผ่ฏทๅฐ้กต้ขๆฅ็'
def __write_local_answer(self, exits_answers, question, answer):
"""
็ญพๅฐๆๅๅๅ
ฅๆฌๅฐๆไปถ
"""
try:
exits_answers[question] = answer
# ๅบๅๅๆฐๆฎ
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("็ญพๅฐๆๅๅๅ
ฅๆฌๅฐๆไปถๅคฑ่ดฅ")
| [] |
2024-01-10 | bingege-global/nas-tools | app~media~media.py | import difflib
import os
import random
import re
import traceback
from functools import lru_cache
import zhconv
from lxml import etree
import log
from app.helper import MetaHelper
from app.helper.openai_helper import OpenAiHelper
from app.media.meta.metainfo import MetaInfo
from app.media.tmdbv3api import TMDb, Search, Movie, TV, Person, Find, TMDbException, Discover, Trending, Episode, Genre
from app.utils import PathUtils, EpisodeFormat, RequestUtils, NumberUtils, StringUtils, cacheman
from app.utils.types import MediaType, MatchMode
from config import Config, KEYWORD_BLACKLIST, KEYWORD_SEARCH_WEIGHT_3, KEYWORD_SEARCH_WEIGHT_2, KEYWORD_SEARCH_WEIGHT_1, \
KEYWORD_STR_SIMILARITY_THRESHOLD, KEYWORD_DIFF_SCORE_THRESHOLD
class Media:
# TheMovieDB
tmdb = None
search = None
movie = None
tv = None
episode = None
person = None
find = None
trending = None
discover = None
genre = None
meta = None
openai = None
_rmt_match_mode = None
_search_keyword = None
_search_tmdbweb = None
_chatgpt_enable = None
_default_language = None
def __init__(self):
self.init_config()
def init_config(self):
app = Config().get_config('app')
media = Config().get_config('media')
laboratory = Config().get_config('laboratory')
# ่พ
ๅฉๆฅ่ฏข
self._search_keyword = laboratory.get("search_keyword")
# WEB่พ
ๅฉ
self._search_tmdbweb = laboratory.get("search_tmdbweb")
# ChatGPT่พ
ๅฉ
self._chatgpt_enable = laboratory.get("chatgpt_enable")
# ้ป่ฎค่ฏญ่จ
self._default_language = media.get("tmdb_language", "zh") or "zh"
# TMDB
if app.get('rmt_tmdbkey'):
# TMDBไธปไฝ
self.tmdb = TMDb()
# ๅๅ
self.tmdb.domain = Config().get_tmdbapi_url()
# ๅผๅฏ็ผๅญ
self.tmdb.cache = True
# APIKEY
self.tmdb.api_key = app.get('rmt_tmdbkey')
# ่ฏญ็ง
self.tmdb.language = self._default_language
# ไปฃ็
self.tmdb.proxies = Config().get_proxies()
# ่ฐ่ฏๆจกๅผ
self.tmdb.debug = False
# ๆฅ่ฏขๅฏน่ฑก
self.search = Search()
self.movie = Movie()
self.tv = TV()
self.episode = Episode()
self.find = Find()
self.person = Person()
self.trending = Trending()
self.discover = Discover()
self.genre = Genre()
# ๅ
ๆฐๆฎ็ผๅญ
self.meta = MetaHelper()
# ChatGPT
self.openai = OpenAiHelper()
# ๅน้
ๆจกๅผ
rmt_match_mode = app.get('rmt_match_mode', 'normal')
if rmt_match_mode:
rmt_match_mode = rmt_match_mode.upper()
else:
rmt_match_mode = "NORMAL"
if rmt_match_mode == "STRICT":
self._rmt_match_mode = MatchMode.STRICT
else:
self._rmt_match_mode = MatchMode.NORMAL
def __set_language(self, language):
"""
่ฎพ็ฝฎ่ฏญ่จ
:param language: zh/en
"""
if not self.tmdb:
return
if language:
self.tmdb.language = language
else:
self.tmdb.language = self._default_language
@staticmethod
def __compare_tmdb_names(file_name, tmdb_names):
"""
ๆฏ่พๆไปถๅๆฏๅฆๅน้
๏ผๅฟฝ็ฅๅคงๅฐๅๅ็นๆฎๅญ็ฌฆ
:param file_name: ่ฏๅซ็ๆไปถๅๆ่
็งๅญๅ
:param tmdb_names: TMDB่ฟๅ็่ฏๅ
:return: True or False
"""
if not file_name or not tmdb_names:
return False
if not isinstance(tmdb_names, list):
tmdb_names = [tmdb_names]
file_name = StringUtils.handler_special_chars(file_name).upper()
for tmdb_name in tmdb_names:
tmdb_name = StringUtils.handler_special_chars(tmdb_name).strip().upper()
if file_name == tmdb_name:
return True
return False
def __search_tmdb_allnames(self, mtype: MediaType, tmdb_id):
"""
ๆ็ดขtmdbไธญๆๆ็ๆ ้ขๅ่ฏๅ๏ผ็จไบๅ็งฐๅน้
:param mtype: ็ฑปๅ๏ผ็ตๅฝฑใ็ต่งๅงใๅจๆผซ
:param tmdb_id: TMDB็ID
:return: ๆๆ่ฏๅ็ๆธ
ๅ
"""
if not mtype or not tmdb_id:
return {}, []
ret_names = []
tmdb_info = self.get_tmdb_info(mtype=mtype, tmdbid=tmdb_id)
if not tmdb_info:
return tmdb_info, []
if mtype == MediaType.MOVIE:
alternative_titles = tmdb_info.get("alternative_titles", {}).get("titles", [])
for alternative_title in alternative_titles:
title = alternative_title.get("title")
if title and title not in ret_names:
ret_names.append(title)
translations = tmdb_info.get("translations", {}).get("translations", [])
for translation in translations:
title = translation.get("data", {}).get("title")
if title and title not in ret_names:
ret_names.append(title)
else:
alternative_titles = tmdb_info.get("alternative_titles", {}).get("results", [])
for alternative_title in alternative_titles:
name = alternative_title.get("title")
if name and name not in ret_names:
ret_names.append(name)
translations = tmdb_info.get("translations", {}).get("translations", [])
for translation in translations:
name = translation.get("data", {}).get("name")
if name and name not in ret_names:
ret_names.append(name)
return tmdb_info, ret_names
def __search_tmdb(self, file_media_name,
search_type,
first_media_year=None,
media_year=None,
season_number=None):
"""
ๆ็ดขtmdbไธญ็ๅชไฝไฟกๆฏ๏ผๅน้
่ฟๅไธๆกๅฐฝๅฏ่ฝๆญฃ็กฎ็ไฟกๆฏ
:param file_media_name: ๅ็ดข็ๅ็งฐ
:param search_type: ็ฑปๅ๏ผ็ตๅฝฑใ็ต่งๅงใๅจๆผซ
:param first_media_year: ๅนดไปฝ๏ผๅฆ่ฆๆฏๅญฃ้้่ฆๆฏ้ฆๆญๅนดไปฝ(first_air_date)
:param media_year: ๅฝๅๅญฃ้ๅนดไปฝ
:param season_number: ๅญฃ้๏ผๆดๆฐ
:return: TMDB็INFO๏ผๅๆถไผๅฐsearch_type่ตๅผๅฐmedia_typeไธญ
"""
if not self.search:
return None
if not file_media_name:
return None
# TMDBๆ็ดข
info = {}
if search_type == MediaType.MOVIE:
year_range = [first_media_year]
if first_media_year:
year_range.append(str(int(first_media_year) + 1))
year_range.append(str(int(first_media_year) - 1))
for year in year_range:
log.debug(
f"ใMetaใๆญฃๅจ่ฏๅซ{search_type.value}๏ผ{file_media_name}, ๅนดไปฝ={year} ...")
info = self.__search_movie_by_name(file_media_name, year)
if info:
info['media_type'] = MediaType.MOVIE
log.info("ใMetaใ%s ่ฏๅซๅฐ ็ตๅฝฑ๏ผTMDBID=%s, ๅ็งฐ=%s, ไธๆ ๆฅๆ=%s" % (
file_media_name,
info.get('id'),
info.get('title'),
info.get('release_date')))
break
else:
# ๆๅฝๅๅญฃๅๅฝๅๅญฃ้ๅนดไปฝ๏ผไฝฟ็จ็ฒพ็กฎๅน้
if media_year and season_number:
log.debug(
f"ใMetaใๆญฃๅจ่ฏๅซ{search_type.value}๏ผ{file_media_name}, ๅญฃ้={season_number}, ๅญฃ้ๅนดไปฝ={media_year} ...")
info = self.__search_tv_by_season(file_media_name,
media_year,
season_number)
if not info:
log.debug(
f"ใMetaใๆญฃๅจ่ฏๅซ{search_type.value}๏ผ{file_media_name}, ๅนดไปฝ={StringUtils.xstr(first_media_year)} ...")
info = self.__search_tv_by_name(file_media_name,
first_media_year)
if info:
info['media_type'] = MediaType.TV
log.info("ใMetaใ%s ่ฏๅซๅฐ ็ต่งๅง๏ผTMDBID=%s, ๅ็งฐ=%s, ้ฆๆญๆฅๆ=%s" % (
file_media_name,
info.get('id'),
info.get('name'),
info.get('first_air_date')))
# ่ฟๅ
if not info:
log.info("ใMetaใ%s ไปฅๅนดไปฝ %s ๅจTMDBไธญๆชๆพๅฐ%sไฟกๆฏ!" % (
file_media_name, StringUtils.xstr(first_media_year), search_type.value if search_type else ""))
return info
def __search_movie_by_name(self, file_media_name, first_media_year):
"""
ๆ นๆฎๅ็งฐๆฅ่ฏข็ตๅฝฑTMDBๅน้
:param file_media_name: ่ฏๅซ็ๆไปถๅๆ็งๅญๅ
:param first_media_year: ็ตๅฝฑไธๆ ๆฅๆ
:return: ๅน้
็ๅชไฝไฟกๆฏ
"""
try:
if first_media_year:
movies = self.search.movies({"query": file_media_name, "year": first_media_year})
else:
movies = self.search.movies({"query": file_media_name})
except TMDbException as err:
log.error(f"ใMetaใ่ฟๆฅTMDBๅบ้๏ผ{str(err)}")
return None
except Exception as e:
log.error(f"ใMetaใ่ฟๆฅTMDBๅบ้๏ผ{str(e)}")
return None
log.debug(f"ใMetaใAPI่ฟๅ๏ผ{str(self.search.total_results)}")
if len(movies) == 0:
log.debug(f"ใMetaใ{file_media_name} ๆชๆพๅฐ็ธๅ
ณ็ตๅฝฑไฟกๆฏ!")
return {}
else:
info = {}
if first_media_year:
for movie in movies:
if movie.get('release_date'):
if self.__compare_tmdb_names(file_media_name, movie.get('title')) \
and movie.get('release_date')[0:4] == str(first_media_year):
return movie
if self.__compare_tmdb_names(file_media_name, movie.get('original_title')) \
and movie.get('release_date')[0:4] == str(first_media_year):
return movie
else:
for movie in movies:
if self.__compare_tmdb_names(file_media_name, movie.get('title')) \
or self.__compare_tmdb_names(file_media_name, movie.get('original_title')):
return movie
if not info:
index = 0
for movie in movies:
if first_media_year:
if not movie.get('release_date'):
continue
if movie.get('release_date')[0:4] != str(first_media_year):
continue
index += 1
info, names = self.__search_tmdb_allnames(MediaType.MOVIE, movie.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
return info
else:
index += 1
info, names = self.__search_tmdb_allnames(MediaType.MOVIE, movie.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
return info
if index > 5:
break
return {}
def __search_tv_by_name(self, file_media_name, first_media_year):
"""
ๆ นๆฎๅ็งฐๆฅ่ฏข็ต่งๅงTMDBๅน้
:param file_media_name: ่ฏๅซ็ๆไปถๅๆ่
็งๅญๅ
:param first_media_year: ็ต่งๅง็้ฆๆญๅนดไปฝ
:return: ๅน้
็ๅชไฝไฟกๆฏ
"""
try:
if first_media_year:
tvs = self.search.tv_shows({"query": file_media_name, "first_air_date_year": first_media_year})
else:
tvs = self.search.tv_shows({"query": file_media_name})
except TMDbException as err:
log.error(f"ใMetaใ่ฟๆฅTMDBๅบ้๏ผ{str(err)}")
return None
except Exception as e:
log.error(f"ใMetaใ่ฟๆฅTMDBๅบ้๏ผ{str(e)}")
return None
log.debug(f"ใMetaใAPI่ฟๅ๏ผ{str(self.search.total_results)}")
if len(tvs) == 0:
log.debug(f"ใMetaใ{file_media_name} ๆชๆพๅฐ็ธๅ
ณๅง้ไฟกๆฏ!")
return {}
else:
info = {}
if first_media_year:
for tv in tvs:
if tv.get('first_air_date'):
if self.__compare_tmdb_names(file_media_name, tv.get('name')) \
and tv.get('first_air_date')[0:4] == str(first_media_year):
return tv
if self.__compare_tmdb_names(file_media_name, tv.get('original_name')) \
and tv.get('first_air_date')[0:4] == str(first_media_year):
return tv
else:
for tv in tvs:
if self.__compare_tmdb_names(file_media_name, tv.get('name')) \
or self.__compare_tmdb_names(file_media_name, tv.get('original_name')):
return tv
if not info:
index = 0
for tv in tvs:
if first_media_year:
if not tv.get('first_air_date'):
continue
if tv.get('first_air_date')[0:4] != str(first_media_year):
continue
index += 1
info, names = self.__search_tmdb_allnames(MediaType.TV, tv.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
return info
else:
index += 1
info, names = self.__search_tmdb_allnames(MediaType.TV, tv.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
return info
if index > 5:
break
return {}
def __search_tv_by_season(self, file_media_name, media_year, season_number):
"""
ๆ นๆฎ็ต่งๅง็ๅ็งฐๅๅญฃ็ๅนดไปฝๅๅบๅทๅน้
TMDB
:param file_media_name: ่ฏๅซ็ๆไปถๅๆ่
็งๅญๅ
:param media_year: ๅญฃ็ๅนดไปฝ
:param season_number: ๅญฃๅบๅท
:return: ๅน้
็ๅชไฝไฟกๆฏ
"""
def __season_match(tv_info, season_year):
if not tv_info:
return False
try:
seasons = self.get_tmdb_tv_seasons(tv_info=tv_info)
for season in seasons:
if season.get("air_date") and season.get("season_number"):
if season.get("air_date")[0:4] == str(season_year) \
and season.get("season_number") == int(season_number):
return True
except Exception as e1:
log.error(f"ใMetaใ่ฟๆฅTMDBๅบ้๏ผ{e1}")
return False
return False
try:
tvs = self.search.tv_shows({"query": file_media_name})
except TMDbException as err:
log.error(f"ใMetaใ่ฟๆฅTMDBๅบ้๏ผ{str(err)}")
return None
except Exception as e:
log.error(f"ใMetaใ่ฟๆฅTMDBๅบ้๏ผ{e}")
return None
if len(tvs) == 0:
log.debug("ใMetaใ%s ๆชๆพๅฐๅญฃ%s็ธๅ
ณไฟกๆฏ!" % (file_media_name, season_number))
return {}
else:
for tv in tvs:
if (self.__compare_tmdb_names(file_media_name, tv.get('name'))
or self.__compare_tmdb_names(file_media_name, tv.get('original_name'))) \
and (tv.get('first_air_date') and tv.get('first_air_date')[0:4] == str(media_year)):
return tv
for tv in tvs[:5]:
info, names = self.__search_tmdb_allnames(MediaType.TV, tv.get("id"))
if not self.__compare_tmdb_names(file_media_name, names):
continue
if __season_match(tv_info=info, season_year=media_year):
return info
return {}
def __search_multi_tmdb(self, file_media_name):
"""
ๆ นๆฎๅ็งฐๅๆถๆฅ่ฏข็ตๅฝฑๅ็ต่งๅง๏ผไธๅธฆๅนดไปฝ
:param file_media_name: ่ฏๅซ็ๆไปถๅๆ็งๅญๅ
:return: ๅน้
็ๅชไฝไฟกๆฏ
"""
try:
multis = self.search.multi({"query": file_media_name}) or []
except TMDbException as err:
log.error(f"ใMetaใ่ฟๆฅTMDBๅบ้๏ผ{str(err)}")
return None
except Exception as e:
log.error(f"ใMetaใ่ฟๆฅTMDBๅบ้๏ผ{str(e)}")
return None
log.debug(f"ใMetaใAPI่ฟๅ๏ผ{str(self.search.total_results)}")
if len(multis) == 0:
log.debug(f"ใMetaใ{file_media_name} ๆชๆพๅฐ็ธๅ
ณๅชไฝๆฏ!")
return {}
else:
info = {}
for multi in multis:
if multi.get("media_type") == "movie":
if self.__compare_tmdb_names(file_media_name, multi.get('title')) \
or self.__compare_tmdb_names(file_media_name, multi.get('original_title')):
info = multi
elif multi.get("media_type") == "tv":
if self.__compare_tmdb_names(file_media_name, multi.get('name')) \
or self.__compare_tmdb_names(file_media_name, multi.get('original_name')):
info = multi
if not info:
for multi in multis[:5]:
if multi.get("media_type") == "movie":
movie_info, names = self.__search_tmdb_allnames(MediaType.MOVIE, multi.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
info = movie_info
elif multi.get("media_type") == "tv":
tv_info, names = self.__search_tmdb_allnames(MediaType.TV, multi.get("id"))
if self.__compare_tmdb_names(file_media_name, names):
info = tv_info
# ่ฟๅ
if info:
info['media_type'] = MediaType.MOVIE if info.get('media_type') in ['movie',
MediaType.MOVIE] else MediaType.TV
else:
log.info("ใMetaใ%s ๅจTMDBไธญๆชๆพๅฐๅชไฝไฟกๆฏ!" % file_media_name)
return info
@lru_cache(maxsize=512)
def __search_chatgpt(self, file_name, mtype: MediaType):
"""
้่ฟChatGPTๅฏน่ฏ่ฏๅซๆไปถๅๅ้ๆฐ็ญไฟกๆฏ๏ผ้ๆฐๆฅ่ฏขTMDBๆฐๆฎ
:param file_name: ๅ็งฐ
:param mtype: ๅชไฝ็ฑปๅ
:return: ็ฑปๅใๅญฃใ้ใTMDBINFO
"""
def __failed():
return mtype, None, None, {}
def __failed_none():
return mtype, None, None, None
if not file_name:
return __failed_none()
log.info("ใMetaใๆญฃๅจ้่ฟChatGPT่ฏๅซๆไปถๅ๏ผ%s" % file_name)
file_info = self.openai.get_media_name(file_name)
if file_info is None:
log.info("ใMetaใChatGPT่ฏๅซๅบ้๏ผ่ฏทๆฃๆฅๆฏๅฆ่ฎพ็ฝฎOpenAI ApiKey๏ผ")
return __failed_none()
if not file_info:
log.info("ใMetaใChatGPT่ฏๅซๅคฑ่ดฅ๏ผ")
return __failed()
else:
log.info("ใMetaใChatGPT่ฏๅซ็ปๆ๏ผ%s" % file_info)
if file_info.get("season") or file_info.get("episode"):
mtype = MediaType.TV
# ๅค็ๆ ้ขๅๅนดไปฝ
file_title, file_year, season_number = None, None, None
if file_info.get("title"):
file_title = str(file_info.get("title")).split("/")[0].strip().replace(".", " ")
if file_info.get("year"):
file_year = str(file_info.get("year")).split("/")[0].strip()
if not file_title:
return __failed()
if not str(file_year).isdigit():
file_year = None
if mtype != MediaType.MOVIE or file_info.get("year"):
tmdb_info = self.__search_tmdb(file_media_name=file_title,
search_type=mtype,
first_media_year=file_year)
else:
tmdb_info = self.__search_multi_tmdb(file_media_name=file_title)
return mtype, file_info.get("season"), file_info.get("episode"), tmdb_info
@lru_cache(maxsize=512)
def __search_tmdb_web(self, file_media_name, mtype: MediaType):
"""
ๆ็ดขTMDB็ฝ็ซ๏ผ็ดๆฅๆๅ็ปๆ๏ผ็ปๆๅชๆไธๆกๆถๆ่ฟๅ
:param file_media_name: ๅ็งฐ
"""
if not file_media_name:
return None
if StringUtils.is_chinese(file_media_name):
return {}
log.info("ใMetaใๆญฃๅจไปTheDbMovie็ฝ็ซๆฅ่ฏข๏ผ%s ..." % file_media_name)
tmdb_url = "https://www.themoviedb.org/search?query=%s" % file_media_name
res = RequestUtils(timeout=5).get_res(url=tmdb_url)
if res and res.status_code == 200:
html_text = res.text
if not html_text:
return None
try:
tmdb_links = []
html = etree.HTML(html_text)
if mtype == MediaType.TV:
links = html.xpath("//a[@data-id and @data-media-type='tv']/@href")
else:
links = html.xpath("//a[@data-id]/@href")
for link in links:
if not link or (not link.startswith("/tv") and not link.startswith("/movie")):
continue
if link not in tmdb_links:
tmdb_links.append(link)
if len(tmdb_links) == 1:
tmdbinfo = self.get_tmdb_info(
mtype=MediaType.TV if tmdb_links[0].startswith("/tv") else MediaType.MOVIE,
tmdbid=tmdb_links[0].split("/")[-1])
if tmdbinfo:
if mtype == MediaType.TV and tmdbinfo.get('media_type') != MediaType.TV:
return {}
if tmdbinfo.get('media_type') == MediaType.MOVIE:
log.info("ใMetaใ%s ไปWEB่ฏๅซๅฐ ็ตๅฝฑ๏ผTMDBID=%s, ๅ็งฐ=%s, ไธๆ ๆฅๆ=%s" % (
file_media_name,
tmdbinfo.get('id'),
tmdbinfo.get('title'),
tmdbinfo.get('release_date')))
else:
log.info("ใMetaใ%s ไปWEB่ฏๅซๅฐ ็ต่งๅง๏ผTMDBID=%s, ๅ็งฐ=%s, ้ฆๆญๆฅๆ=%s" % (
file_media_name,
tmdbinfo.get('id'),
tmdbinfo.get('name'),
tmdbinfo.get('first_air_date')))
return tmdbinfo
elif len(tmdb_links) > 1:
log.info("ใMetaใ%s TMDB็ฝ็ซ่ฟๅๆฐๆฎ่ฟๅค๏ผ%s" % (file_media_name, len(tmdb_links)))
else:
log.info("ใMetaใ%s TMDB็ฝ็ซๆชๆฅ่ฏขๅฐๅชไฝไฟกๆฏ๏ผ" % file_media_name)
except Exception as err:
print(str(err))
return None
return None
def search_tmdb_person(self, name):
"""
ๆ็ดขTMDBๆผๅไฟกๆฏ
"""
if not self.search:
return []
try:
return self.__dict_tmdbpersons(self.search.people({"query": name}))
except Exception as err:
print(str(err))
return []
def get_tmdb_info(self, mtype: MediaType,
tmdbid,
language=None,
append_to_response=None,
chinese=True):
"""
็ปๅฎTMDBๅท๏ผๆฅ่ฏขไธๆกๅชไฝไฟกๆฏ
:param mtype: ็ฑปๅ๏ผ็ตๅฝฑใ็ต่งๅงใๅจๆผซ๏ผไธบ็ฉบๆถ้ฝๆฅ๏ผๆญคๆถ็จไธไธๅนดไปฝ๏ผ
:param tmdbid: TMDB็ID๏ผๆtmdbidๆถไผๅ
ไฝฟ็จtmdbid๏ผๅฆๅไฝฟ็จๅนดไปฝๅๆ ้ข
:param language: ่ฏญ็ง
:param append_to_response: ้ๅ ไฟกๆฏ
:param chinese: ๆฏๅฆ่ฝฌๆขไธญๆๆ ้ข
"""
if not self.tmdb:
log.error("ใMetaใTMDB API Key ๆช่ฎพ็ฝฎ๏ผ")
return None
# ่ฎพ็ฝฎ่ฏญ่จ
self.__set_language(language)
if mtype == MediaType.MOVIE:
tmdb_info = self.__get_tmdb_movie_detail(tmdbid, append_to_response)
if tmdb_info:
tmdb_info['media_type'] = MediaType.MOVIE
else:
tmdb_info = self.__get_tmdb_tv_detail(tmdbid, append_to_response)
if tmdb_info:
tmdb_info['media_type'] = MediaType.TV
if tmdb_info:
# ่ฝฌๆขgenreid
tmdb_info['genre_ids'] = self.__get_genre_ids_from_detail(tmdb_info.get('genres'))
# ่ฝฌๆขไธญๆๆ ้ข
if chinese:
tmdb_info = self.__update_tmdbinfo_cn_title(tmdb_info)
return tmdb_info
def __update_tmdbinfo_cn_title(self, tmdb_info):
"""
ๆดๆฐTMDBไฟกๆฏไธญ็ไธญๆๅ็งฐ
"""
# ๆฅๆพไธญๆๅ
org_title = tmdb_info.get("title") \
if tmdb_info.get("media_type") == MediaType.MOVIE \
else tmdb_info.get("name")
if not StringUtils.is_chinese(org_title) \
and self._default_language == 'zh':
cn_title = self.__get_tmdb_chinese_title(tmdbinfo=tmdb_info)
if cn_title and cn_title != org_title:
if tmdb_info.get("media_type") == MediaType.MOVIE:
tmdb_info['title'] = cn_title
else:
tmdb_info['name'] = cn_title
return tmdb_info
def get_tmdb_infos(self, title, year=None, mtype: MediaType = None, language=None, page=1):
"""
ๆฅ่ฏขๅ็งฐไธญๆๅ
ณ้ฎๅญ็ๆๆ็TMDBไฟกๆฏๅนถ่ฟๅ
"""
if not self.tmdb:
log.error("ใMetaใTMDB API Key ๆช่ฎพ็ฝฎ๏ผ")
return []
if not title:
return []
# ่ฎพ็ฝฎ่ฏญ่จ
self.__set_language(language)
if not mtype and not year:
results = self.__search_multi_tmdbinfos(title)
else:
if not mtype:
results = list(
set(self.__search_movie_tmdbinfos(title, year)).union(set(self.__search_tv_tmdbinfos(title, year))))
# ็ปๅ็ปๆ็ๆ
ๅตไธ่ฆๆๅบ
results = sorted(results,
key=lambda x: x.get("release_date") or x.get("first_air_date") or "0000-00-00",
reverse=True)
elif mtype == MediaType.MOVIE:
results = self.__search_movie_tmdbinfos(title, year)
else:
results = self.__search_tv_tmdbinfos(title, year)
return results[(page - 1) * 20:page * 20]
def __search_multi_tmdbinfos(self, title):
"""
ๅๆถๆฅ่ฏขๆจก็ณๅน้
็็ตๅฝฑใ็ต่งๅงTMDBไฟกๆฏ
"""
if not title:
return []
ret_infos = []
multis = self.search.multi({"query": title}) or []
for multi in multis:
if multi.get("media_type") in ["movie", "tv"]:
multi['media_type'] = MediaType.MOVIE if multi.get("media_type") == "movie" else MediaType.TV
ret_infos.append(multi)
return ret_infos
def __search_movie_tmdbinfos(self, title, year):
"""
ๆฅ่ฏขๆจก็ณๅน้
็ๆๆ็ตๅฝฑTMDBไฟกๆฏ
"""
if not title:
return []
ret_infos = []
if year:
movies = self.search.movies({"query": title, "year": year}) or []
else:
movies = self.search.movies({"query": title}) or []
for movie in movies:
if title in movie.get("title"):
movie['media_type'] = MediaType.MOVIE
ret_infos.append(movie)
return ret_infos
def __search_tv_tmdbinfos(self, title, year):
"""
ๆฅ่ฏขๆจก็ณๅน้
็ๆๆ็ต่งๅงTMDBไฟกๆฏ
"""
if not title:
return []
ret_infos = []
if year:
tvs = self.search.tv_shows({"query": title, "first_air_date_year": year}) or []
else:
tvs = self.search.tv_shows({"query": title}) or []
for tv in tvs:
if title in tv.get("name"):
tv['media_type'] = MediaType.TV
ret_infos.append(tv)
return ret_infos
@staticmethod
def __make_cache_key(meta_info):
"""
็ๆ็ผๅญ็key
"""
if not meta_info:
return None
return f"[{meta_info.type.value}]{meta_info.get_name()}-{meta_info.year}-{meta_info.begin_season}"
def get_cache_info(self, meta_info):
"""
ๆ นๆฎๅ็งฐๆฅ่ฏขๆฏๅฆๅทฒ็ปๆ็ผๅญ
"""
if not meta_info:
return {}
return self.meta.get_meta_data_by_key(self.__make_cache_key(meta_info))
def get_media_info(self, title,
subtitle=None,
mtype=None,
strict=None,
cache=True,
language=None,
chinese=True,
append_to_response=None):
"""
ๅชๆๅ็งฐไฟกๆฏ๏ผๅคๅซๆฏ็ตๅฝฑ่ฟๆฏ็ต่งๅงๅนถๆๅฎTMDBไฟกๆฏ๏ผ็จไบ็งๅญๅ็งฐ่ฏๅซ
:param title: ็งๅญๅ็งฐ
:param subtitle: ็งๅญๅฏๆ ้ข
:param mtype: ็ฑปๅ๏ผ็ตๅฝฑใ็ต่งๅงใๅจๆผซ
:param strict: ๆฏๅฆไธฅๆ ผๆจกๅผ๏ผไธบtrueๆถ๏ผไธไผๅๅปๆๅนดไปฝๅๆฅไธๆฌก
:param cache: ๆฏๅฆไฝฟ็จ็ผๅญ๏ผ้ป่ฎคTRUE
:param language: ่ฏญ่จ
:param chinese: ๅๆ ้ขไธบ่ฑๆๆถๆฏๅฆไปๅซๅไธญๆ็ดขไธญๆๅ็งฐ
:param append_to_response: ้ขๅคๆฅ่ฏข็ไฟกๆฏ
:return: ๅธฆๆTMDBไฟกๆฏ็MetaInfoๅฏน่ฑก
"""
if not self.tmdb:
log.error("ใMetaใTMDB API Key ๆช่ฎพ็ฝฎ๏ผ")
return None
if not title:
return None
# ่ฎพ็ฝฎ่ฏญ่จ
self.__set_language(language)
# ่ฏๅซ
meta_info = MetaInfo(title, subtitle=subtitle)
if not meta_info.get_name() or not meta_info.type:
log.warn("ใRmtใ%s ๆช่ฏๅซๅบๆๆไฟกๆฏ๏ผ" % meta_info.org_string)
return None
if mtype:
meta_info.type = mtype
media_key = self.__make_cache_key(meta_info)
if not cache or not self.meta.get_meta_data_by_key(media_key):
# ็ผๅญๆฒกๆๆ่
ๅผบๅถไธไฝฟ็จ็ผๅญ
if meta_info.type != MediaType.TV and not meta_info.year:
file_media_info = self.__search_multi_tmdb(file_media_name=meta_info.get_name())
else:
if meta_info.type == MediaType.TV:
# ็กฎๅฎๆฏ็ต่ง
file_media_info = self.__search_tmdb(file_media_name=meta_info.get_name(),
first_media_year=meta_info.year,
search_type=meta_info.type,
media_year=meta_info.year,
season_number=meta_info.begin_season
)
if not file_media_info and meta_info.year and self._rmt_match_mode == MatchMode.NORMAL and not strict:
# ้ไธฅๆ ผๆจกๅผไธๅปๆๅนดไปฝๅๆฅไธๆฌก
file_media_info = self.__search_tmdb(file_media_name=meta_info.get_name(),
search_type=meta_info.type
)
else:
# ๆๅนดไปฝๅ
ๆ็ตๅฝฑๆฅ
file_media_info = self.__search_tmdb(file_media_name=meta_info.get_name(),
first_media_year=meta_info.year,
search_type=MediaType.MOVIE
)
# ๆฒกๆๅๆ็ต่งๅงๆฅ
if not file_media_info:
file_media_info = self.__search_tmdb(file_media_name=meta_info.get_name(),
first_media_year=meta_info.year,
search_type=MediaType.TV
)
if not file_media_info and self._rmt_match_mode == MatchMode.NORMAL and not strict:
# ้ไธฅๆ ผๆจกๅผไธๅปๆๅนดไปฝๅ็ฑปๅๅๆฅไธๆฌก
file_media_info = self.__search_multi_tmdb(file_media_name=meta_info.get_name())
if not file_media_info and self._search_tmdbweb:
# ไป็ฝ็ซๆฅ่ฏข
file_media_info = self.__search_tmdb_web(file_media_name=meta_info.get_name(),
mtype=meta_info.type)
if not file_media_info and self._chatgpt_enable:
# ้่ฟChatGPTๆฅ่ฏข
mtype, seaons, episodes, file_media_info = self.__search_chatgpt(file_name=title,
mtype=meta_info.type)
# ไฟฎๆญฃ็ฑปๅๅ้ๆฐ
meta_info.type = mtype
if not meta_info.get_season_string():
meta_info.set_season(seaons)
if not meta_info.get_episode_string():
meta_info.set_episode(episodes)
if not file_media_info and self._search_keyword:
# ๅ
ณ้ฎๅญ็ๆต
cache_name = cacheman["tmdb_supply"].get(meta_info.get_name())
is_movie = False
if not cache_name:
cache_name, is_movie = self.__search_engine(meta_info.get_name())
cacheman["tmdb_supply"].set(meta_info.get_name(), cache_name)
if cache_name:
log.info("ใMetaใๅผๅง่พ
ๅฉๆฅ่ฏข๏ผ%s ..." % cache_name)
if is_movie:
file_media_info = self.__search_tmdb(file_media_name=cache_name, search_type=MediaType.MOVIE)
else:
file_media_info = self.__search_multi_tmdb(file_media_name=cache_name)
# ่กฅๅ
ๅ
จ้ไฟกๆฏ
if file_media_info and not file_media_info.get("genres"):
file_media_info = self.get_tmdb_info(mtype=file_media_info.get("media_type"),
tmdbid=file_media_info.get("id"),
chinese=chinese,
append_to_response=append_to_response)
# ไฟๅญๅฐ็ผๅญ
if file_media_info is not None:
self.__insert_media_cache(media_key=media_key,
file_media_info=file_media_info)
else:
# ไฝฟ็จ็ผๅญไฟกๆฏ
cache_info = self.meta.get_meta_data_by_key(media_key)
if cache_info.get("id"):
file_media_info = self.get_tmdb_info(mtype=cache_info.get("type"),
tmdbid=cache_info.get("id"),
chinese=chinese,
append_to_response=append_to_response)
else:
file_media_info = None
# ่ตๅผTMDBไฟกๆฏๅนถ่ฟๅ
meta_info.set_tmdb_info(file_media_info)
return meta_info
def __insert_media_cache(self, media_key, file_media_info):
"""
ๅฐTMDBไฟกๆฏๆๅ
ฅ็ผๅญ
"""
if file_media_info:
# ็ผๅญๆ ้ข
cache_title = file_media_info.get(
"title") if file_media_info.get(
"media_type") == MediaType.MOVIE else file_media_info.get("name")
# ็ผๅญๅนดไปฝ
cache_year = file_media_info.get('release_date') if file_media_info.get(
"media_type") == MediaType.MOVIE else file_media_info.get('first_air_date')
if cache_year:
cache_year = cache_year[:4]
self.meta.update_meta_data({
media_key: {
"id": file_media_info.get("id"),
"type": file_media_info.get("media_type"),
"year": cache_year,
"title": cache_title,
"poster_path": file_media_info.get("poster_path"),
"backdrop_path": file_media_info.get("backdrop_path")
}
})
else:
self.meta.update_meta_data({media_key: {'id': 0}})
def get_media_info_on_files(self,
file_list,
tmdb_info=None,
media_type=None,
season=None,
episode_format: EpisodeFormat = None,
language=None,
chinese=True,
append_to_response=None):
"""
ๆ นๆฎๆไปถๆธ
ๅ๏ผๆๅฎTMDBไฟกๆฏ๏ผ็จไบๆไปถๅ็งฐ็่ฏๅซ
:param file_list: ๆไปถๆธ
ๅ๏ผๅฆๆๆฏๅ่กจไนๅฏไปฅๆฏๅไธชๆไปถ๏ผไนๅฏไปฅๆฏไธไธช็ฎๅฝ
:param tmdb_info: ๅฆๆไผ ๅ
ฅTMDBไฟกๆฏๅไปฅ่ฏฅTMDBไฟกๆฏ่ตไบๆๆๆไปถ๏ผๅฆๅๆๅ็งฐไปTMDBๆ็ดข๏ผ็จไบๆๅทฅ่ฏๅซๆถไผ ๅ
ฅ
:param media_type: ๅชไฝ็ฑปๅ๏ผ็ตๅฝฑใ็ต่งๅงใๅจๆผซ๏ผๅฆๆไผ ๅ
ฅไปฅ่ฏฅ็ฑปๅ่ตไบๆๆๆไปถ๏ผๅฆๅๆๅ็งฐไปTMDBๆ็ดขๅนถ่ฏๅซ
:param season: ๅญฃๅท๏ผๅฆๆไผ ๅ
ฅไปฅ่ฏฅๅญฃๅท่ตไบๆๆๆไปถ๏ผๅฆๅไปๅ็งฐไธญ่ฏๅซ
:param episode_format: EpisodeFormat
:param language: ่ฏญ่จ
:param chinese: ๅๆ ้ขไธบ่ฑๆๆถๆฏๅฆไปๅซๅไธญๆ็ดขไธญๆๅ็งฐ
:param append_to_response: ้ๅ ไฟกๆฏ
:return: ๅธฆๆTMDBไฟกๆฏ็ๆฏไธชๆไปถๅฏนๅบ็MetaInfoๅฏน่ฑกๅญๅ
ธ
"""
# ๅญๅจๆไปถ่ทฏๅพไธๅชไฝ็ๅฏนๅบๅ
ณ็ณป
if not self.tmdb:
log.error("ใMetaใTMDB API Key ๆช่ฎพ็ฝฎ๏ผ")
return {}
# ่ฎพ็ฝฎ่ฏญ่จ
self.__set_language(language)
# ่ฟๅ็ปๆ
return_media_infos = {}
# ไธๆฏlist็่ฝฌไธบlist
if not isinstance(file_list, list):
file_list = [file_list]
# ้ๅๆฏไธชๆไปถ๏ผ็ๅพๅบๆฅ็ๅ็งฐๆฏไธๆฏไธไธๆ ท๏ผไธไธๆ ท็ๅ
ๆ็ดขๅชไฝไฟกๆฏ
for file_path in file_list:
try:
if not os.path.exists(file_path):
log.warn("ใMetaใ%s ไธๅญๅจ" % file_path)
continue
# ่งฃๆๅชไฝๅ็งฐ
# ๅ
็จ่ชๅทฑ็ๅ็งฐ
file_name = os.path.basename(file_path)
parent_name = os.path.basename(os.path.dirname(file_path))
parent_parent_name = os.path.basename(PathUtils.get_parent_paths(file_path, 2))
# ่ฟๆปคๆ่ๅ
ๅ็็ฎๅฝไธ็ๅญๆไปถ
if not os.path.isdir(file_path) \
and PathUtils.get_bluray_dir(file_path):
log.info("ใMetaใ%s ่ทณ่ฟ่ๅ
ๅ็ๆไปถ๏ผ" % file_path)
continue
# ๆฒกๆ่ชๅธฆTMDBไฟกๆฏ
if not tmdb_info:
# ่ฏๅซๅ็งฐ
meta_info = MetaInfo(title=file_name)
# ่ฏๅซไธๅฐๅไฝฟ็จไธ็บง็ๅ็งฐ
if not meta_info.get_name() or not meta_info.year:
parent_info = MetaInfo(parent_name)
if not parent_info.get_name() or not parent_info.year:
parent_parent_info = MetaInfo(parent_parent_name)
parent_info.type = parent_parent_info.type if parent_parent_info.type and parent_info.type != MediaType.TV else parent_info.type
parent_info.cn_name = parent_parent_info.cn_name if parent_parent_info.cn_name else parent_info.cn_name
parent_info.en_name = parent_parent_info.en_name if parent_parent_info.en_name else parent_info.en_name
parent_info.year = parent_parent_info.year if parent_parent_info.year else parent_info.year
parent_info.begin_season = NumberUtils.max_ele(parent_info.begin_season,
parent_parent_info.begin_season)
if not meta_info.get_name():
meta_info.cn_name = parent_info.cn_name
meta_info.en_name = parent_info.en_name
if not meta_info.year:
meta_info.year = parent_info.year
if parent_info.type and parent_info.type == MediaType.TV \
and meta_info.type != MediaType.TV:
meta_info.type = parent_info.type
if meta_info.type == MediaType.TV:
meta_info.begin_season = NumberUtils.max_ele(parent_info.begin_season,
meta_info.begin_season)
if not meta_info.get_name() or not meta_info.type:
log.warn("ใRmtใ%s ๆช่ฏๅซๅบๆๆไฟกๆฏ๏ผ" % meta_info.org_string)
continue
# ๅบ้
็ผๅญๅTMDB
media_key = self.__make_cache_key(meta_info)
if not self.meta.get_meta_data_by_key(media_key):
# ๆฒกๆ็ผๅญๆฐๆฎ
file_media_info = self.__search_tmdb(file_media_name=meta_info.get_name(),
first_media_year=meta_info.year,
search_type=meta_info.type,
media_year=meta_info.year,
season_number=meta_info.begin_season)
if not file_media_info:
if self._rmt_match_mode == MatchMode.NORMAL:
# ๅปๆๅนดไปฝๅๆฅไธๆฌก๏ผๆๅฏ่ฝๆฏๅนดไปฝ้่ฏฏ
file_media_info = self.__search_tmdb(file_media_name=meta_info.get_name(),
search_type=meta_info.type)
if not file_media_info and self._chatgpt_enable:
# ไปChatGPTๆฅ่ฏข
mtype, seaons, episodes, file_media_info = self.__search_chatgpt(file_name=file_path,
mtype=meta_info.type)
# ไฟฎๆญฃ็ฑปๅๅ้ๆฐ
meta_info.type = mtype
if not meta_info.get_season_string():
meta_info.set_season(seaons)
if not meta_info.get_episode_string():
meta_info.set_episode(episodes)
if not file_media_info and self._search_keyword:
cache_name = cacheman["tmdb_supply"].get(meta_info.get_name())
is_movie = False
if not cache_name:
cache_name, is_movie = self.__search_engine(meta_info.get_name())
cacheman["tmdb_supply"].set(meta_info.get_name(), cache_name)
if cache_name:
log.info("ใMetaใๅผๅง่พ
ๅฉๆฅ่ฏข๏ผ%s ..." % cache_name)
if is_movie:
file_media_info = self.__search_tmdb(file_media_name=cache_name,
search_type=MediaType.MOVIE)
else:
file_media_info = self.__search_multi_tmdb(file_media_name=cache_name)
# ่กฅๅ
จTMDBไฟกๆฏ
if file_media_info and not file_media_info.get("genres"):
file_media_info = self.get_tmdb_info(mtype=file_media_info.get("media_type"),
tmdbid=file_media_info.get("id"),
chinese=chinese,
append_to_response=append_to_response)
# ไฟๅญๅฐ็ผๅญ
if file_media_info is not None:
self.__insert_media_cache(media_key=media_key,
file_media_info=file_media_info)
else:
# ไฝฟ็จ็ผๅญไฟกๆฏ
cache_info = self.meta.get_meta_data_by_key(media_key)
if cache_info.get("id"):
file_media_info = self.get_tmdb_info(mtype=cache_info.get("type"),
tmdbid=cache_info.get("id"),
chinese=chinese,
append_to_response=append_to_response)
else:
# ็ผๅญไธบๆช่ฏๅซ
file_media_info = None
# ่ตๅผTMDBไฟกๆฏ
meta_info.set_tmdb_info(file_media_info)
# ่ชๅธฆTMDBไฟกๆฏ
else:
meta_info = MetaInfo(title=file_name, mtype=media_type)
meta_info.set_tmdb_info(tmdb_info)
if season and meta_info.type != MediaType.MOVIE:
meta_info.begin_season = int(season)
if episode_format:
begin_ep, end_ep, part = episode_format.split_episode(file_name)
if begin_ep is not None:
meta_info.begin_episode = begin_ep
meta_info.part = part
if end_ep is not None:
meta_info.end_episode = end_ep
# ๅ ๅ
ฅ็ผๅญ
self.save_rename_cache(file_name, tmdb_info)
# ๆๆไปถ่ทฏ็จๅญๅจ
return_media_infos[file_path] = meta_info
except Exception as err:
print(str(err))
log.error("ใRmtใๅ็้่ฏฏ๏ผ%s - %s" % (str(err), traceback.format_exc()))
# ๅพช็ฏ็ปๆ
return return_media_infos
def __dict_tmdbpersons(self, infos, chinese=True):
"""
TMDBไบบๅไฟกๆฏ่ฝฌไธบๅญๅ
ธ
"""
if not infos:
return []
ret_infos = []
for info in infos:
if chinese:
name = self.get_tmdbperson_chinese_name(person_id=info.get("id")) or info.get("name")
else:
name = info.get("name")
tmdbid = info.get("id")
image = Config().get_tmdbimage_url(info.get("profile_path"), prefix="h632") \
if info.get("profile_path") else ""
ret_infos.append({
"id": tmdbid,
"name": name,
"role": info.get("name") if info.get("name") != name else "",
"image": image
})
return ret_infos
@staticmethod
def __dict_tmdbinfos(infos, mtype=None, poster_filter=False):
"""
TMDB็ตๅฝฑไฟกๆฏ่ฝฌไธบๅญๅ
ธ
"""
if not infos:
return []
ret_infos = []
for info in infos:
tmdbid = info.get("id")
vote = round(float(info.get("vote_average")), 1) if info.get("vote_average") else 0,
image = Config().get_tmdbimage_url(info.get("poster_path"))
if poster_filter and not image:
continue
overview = info.get("overview")
if mtype:
media_type = mtype.value
year = info.get("release_date")[0:4] if info.get(
"release_date") and mtype == MediaType.MOVIE else info.get(
"first_air_date")[0:4] if info.get(
"first_air_date") else ""
typestr = 'MOV' if mtype == MediaType.MOVIE else 'TV'
title = info.get("title") if mtype == MediaType.MOVIE else info.get("name")
else:
media_type = MediaType.MOVIE.value if info.get(
"media_type") == "movie" else MediaType.TV.value
year = info.get("release_date")[0:4] if info.get(
"release_date") and info.get(
"media_type") == "movie" else info.get(
"first_air_date")[0:4] if info.get(
"first_air_date") else ""
typestr = 'MOV' if info.get("media_type") == "movie" else 'TV'
title = info.get("title") if info.get("media_type") == "movie" else info.get("name")
ret_infos.append({
'id': tmdbid,
'orgid': tmdbid,
'tmdbid': tmdbid,
'title': title,
'type': typestr,
'media_type': media_type,
'year': year,
'vote': vote,
'image': image,
'overview': overview
})
return ret_infos
def get_tmdb_hot_movies(self, page):
"""
่ทๅ็ญ้จ็ตๅฝฑ
:param page: ็ฌฌๅ ้กต
:return: TMDBไฟกๆฏๅ่กจ
"""
if not self.movie:
return []
return self.__dict_tmdbinfos(self.movie.popular(page), MediaType.MOVIE)
def get_tmdb_hot_tvs(self, page):
"""
่ทๅ็ญ้จ็ต่งๅง
:param page: ็ฌฌๅ ้กต
:return: TMDBไฟกๆฏๅ่กจ
"""
if not self.tv:
return []
return self.__dict_tmdbinfos(self.tv.popular(page), MediaType.TV)
def get_tmdb_new_movies(self, page):
"""
่ทๅๆๆฐ็ตๅฝฑ
:param page: ็ฌฌๅ ้กต
:return: TMDBไฟกๆฏๅ่กจ
"""
if not self.movie:
return []
return self.__dict_tmdbinfos(self.movie.now_playing(page), MediaType.MOVIE)
def get_tmdb_new_tvs(self, page):
"""
่ทๅๆๆฐ็ต่งๅง
:param page: ็ฌฌๅ ้กต
:return: TMDBไฟกๆฏๅ่กจ
"""
if not self.tv:
return []
return self.__dict_tmdbinfos(self.tv.on_the_air(page), MediaType.TV)
def get_tmdb_upcoming_movies(self, page):
"""
่ทๅๅณๅฐไธๆ ็ตๅฝฑ
:param page: ็ฌฌๅ ้กต
:return: TMDBไฟกๆฏๅ่กจ
"""
if not self.movie:
return []
return self.__dict_tmdbinfos(self.movie.upcoming(page), MediaType.MOVIE)
def get_tmdb_trending_all_week(self, page=1):
"""
่ทๅๅณๅฐไธๆ ็ตๅฝฑ
:param page: ็ฌฌๅ ้กต
:return: TMDBไฟกๆฏๅ่กจ
"""
if not self.movie:
return []
return self.__dict_tmdbinfos(self.trending.all_week(page=page))
def __get_tmdb_movie_detail(self, tmdbid, append_to_response=None):
"""
่ทๅ็ตๅฝฑ็่ฏฆๆ
:param tmdbid: TMDB ID
:return: TMDBไฟกๆฏ
"""
"""
{
"adult": false,
"backdrop_path": "/r9PkFnRUIthgBp2JZZzD380MWZy.jpg",
"belongs_to_collection": {
"id": 94602,
"name": "็ฉฟ้ดๅญ็็ซ๏ผ็ณปๅ๏ผ",
"poster_path": "/anHwj9IupRoRZZ98WTBvHpTiE6A.jpg",
"backdrop_path": "/feU1DWV5zMWxXUHJyAIk3dHRQ9c.jpg"
},
"budget": 90000000,
"genres": [
{
"id": 16,
"name": "ๅจ็ป"
},
{
"id": 28,
"name": "ๅจไฝ"
},
{
"id": 12,
"name": "ๅ้ฉ"
},
{
"id": 35,
"name": "ๅๅง"
},
{
"id": 10751,
"name": "ๅฎถๅบญ"
},
{
"id": 14,
"name": "ๅฅๅนป"
}
],
"homepage": "",
"id": 315162,
"imdb_id": "tt3915174",
"original_language": "en",
"original_title": "Puss in Boots: The Last Wish",
"overview": "ๆถ้11ๅนด๏ผ่ญๅฑ่ชๅคงๅ็ฑๅ่็็ซๅคงไพ ๅๆฅไบ๏ผๅฆไป็็ซๅคงไพ ๏ผๅฎไธๅฐผๅฅฅยท็ญๅพทๆๆฏ ้
้ณ๏ผ๏ผไพๆงๅนฝ้ปๆฝๆดๅไธๆๅฐ่ใๆฐๆฌกโ่ฑๅผ้ๅฝโๅ๏ผไนๆกๅฝๅฆไปๅชๅฉไธๆก๏ผไบๆฏไธๅพไธ่ฏทๆฑ่ชๅทฑ็่ๆญๆกฃๅ
ผโๅฎฟๆโโโ่ฟทไบบ็่ฝฏ็ชๅฆ๏ผ่จๅฐ็ยทๆตท่ถๅ
้
้ณ๏ผๆฅๆฝไปฅๆดๆๆฅๆขๅค่ชๅทฑ็ไนๆก็ๅฝใ",
"popularity": 8842.129,
"poster_path": "/rnn30OlNPiC3IOoWHKoKARGsBRK.jpg",
"production_companies": [
{
"id": 33,
"logo_path": "/8lvHyhjr8oUKOOy2dKXoALWKdp0.png",
"name": "Universal Pictures",
"origin_country": "US"
},
{
"id": 521,
"logo_path": "/kP7t6RwGz2AvvTkvnI1uteEwHet.png",
"name": "DreamWorks Animation",
"origin_country": "US"
}
],
"production_countries": [
{
"iso_3166_1": "US",
"name": "United States of America"
}
],
"release_date": "2022-12-07",
"revenue": 260725470,
"runtime": 102,
"spoken_languages": [
{
"english_name": "English",
"iso_639_1": "en",
"name": "English"
},
{
"english_name": "Spanish",
"iso_639_1": "es",
"name": "Espaรฑol"
}
],
"status": "Released",
"tagline": "",
"title": "็ฉฟ้ดๅญ็็ซ2",
"video": false,
"vote_average": 8.614,
"vote_count": 2291
}
"""
if not self.movie:
return {}
try:
log.info("ใMetaใๆญฃๅจๆฅ่ฏขTMDB็ตๅฝฑ๏ผ%s ..." % tmdbid)
tmdbinfo = self.movie.details(tmdbid, append_to_response)
if tmdbinfo:
log.info(f"ใMetaใ{tmdbid} ๆฅ่ฏข็ปๆ๏ผ{tmdbinfo.get('title')}")
return tmdbinfo or {}
except Exception as e:
print(str(e))
return None
def __get_tmdb_tv_detail(self, tmdbid, append_to_response=None):
"""
่ทๅ็ต่งๅง็่ฏฆๆ
:param tmdbid: TMDB ID
:return: TMDBไฟกๆฏ
"""
"""
{
"adult": false,
"backdrop_path": "/uDgy6hyPd82kOHh6I95FLtLnj6p.jpg",
"created_by": [
{
"id": 35796,
"credit_id": "5e84f06a3344c600153f6a57",
"name": "Craig Mazin",
"gender": 2,
"profile_path": "/uEhna6qcMuyU5TP7irpTUZ2ZsZc.jpg"
},
{
"id": 1295692,
"credit_id": "5e84f03598f1f10016a985c0",
"name": "Neil Druckmann",
"gender": 2,
"profile_path": "/bVUsM4aYiHbeSYE1xAw2H5Z1ANU.jpg"
}
],
"episode_run_time": [],
"first_air_date": "2023-01-15",
"genres": [
{
"id": 18,
"name": "ๅงๆ
"
},
{
"id": 10765,
"name": "Sci-Fi & Fantasy"
},
{
"id": 10759,
"name": "ๅจไฝๅ้ฉ"
}
],
"homepage": "https://www.hbo.com/the-last-of-us",
"id": 100088,
"in_production": true,
"languages": [
"en"
],
"last_air_date": "2023-01-15",
"last_episode_to_air": {
"air_date": "2023-01-15",
"episode_number": 1,
"id": 2181581,
"name": "ๅฝไฝ ่ฟทๅคฑๅจ้ปๆไธญ",
"overview": "ๅจไธๅบๅ
จ็ๆง็ๆต่ก็
ๆงๆฏไบๆๆไนๅ๏ผไธไธช้กฝๅผบ็ๅนธๅญ่
่ด่ดฃ็
ง้กพไธไธช 14 ๅฒ็ๅฐๅฅณๅญฉ๏ผๅฅนๅฏ่ฝๆฏไบบ็ฑปๆๅ็ๅธๆใ",
"production_code": "",
"runtime": 81,
"season_number": 1,
"show_id": 100088,
"still_path": "/aRquEWm8wWF1dfa9uZ1TXLvVrKD.jpg",
"vote_average": 8,
"vote_count": 33
},
"name": "ๆๅ็่ฟ่
",
"next_episode_to_air": {
"air_date": "2023-01-22",
"episode_number": 2,
"id": 4071039,
"name": "่ซ่ๅๅผ่",
"overview": "",
"production_code": "",
"runtime": 55,
"season_number": 1,
"show_id": 100088,
"still_path": "/jkUtYTmeap6EvkHI4n0j5IRFrIr.jpg",
"vote_average": 10,
"vote_count": 1
},
"networks": [
{
"id": 49,
"name": "HBO",
"logo_path": "/tuomPhY2UtuPTqqFnKMVHvSb724.png",
"origin_country": "US"
}
],
"number_of_episodes": 9,
"number_of_seasons": 1,
"origin_country": [
"US"
],
"original_language": "en",
"original_name": "The Last of Us",
"overview": "ไธๆ็่็ซๆ
่่ไนๅ็็พๅฝ๏ผ่ขซ็่ๆๆ็ไบบ้ฝๅๆไบๅฏๆ็ๆช็ฉ๏ผไนๅฐ๏ผJoel๏ผไธบไบๆขๅๆญฆๅจ็ญๅบๅฐๅฐๅฅณๅญฉๅฟ่พ่๏ผEllie๏ผ้ๅฐๆๅฎๅฐ็น๏ผ็ฑๆญคๅผๅงไบไธคไบบ็ฉฟ่ถ็พๅฝ็ๆผซๆผซๆ
็จใ",
"popularity": 5585.639,
"poster_path": "/nOY3VBFO0VnlN9nlRombnMTztyh.jpg",
"production_companies": [
{
"id": 3268,
"logo_path": "/tuomPhY2UtuPTqqFnKMVHvSb724.png",
"name": "HBO",
"origin_country": "US"
},
{
"id": 11073,
"logo_path": "/aCbASRcI1MI7DXjPbSW9Fcv9uGR.png",
"name": "Sony Pictures Television Studios",
"origin_country": "US"
},
{
"id": 23217,
"logo_path": "/kXBZdQigEf6QiTLzo6TFLAa7jKD.png",
"name": "Naughty Dog",
"origin_country": "US"
},
{
"id": 115241,
"logo_path": null,
"name": "The Mighty Mint",
"origin_country": "US"
},
{
"id": 119645,
"logo_path": null,
"name": "Word Games",
"origin_country": "US"
},
{
"id": 125281,
"logo_path": "/3hV8pyxzAJgEjiSYVv1WZ0ZYayp.png",
"name": "PlayStation Productions",
"origin_country": "US"
}
],
"production_countries": [
{
"iso_3166_1": "US",
"name": "United States of America"
}
],
"seasons": [
{
"air_date": "2023-01-15",
"episode_count": 9,
"id": 144593,
"name": "็ฌฌ 1 ๅญฃ",
"overview": "",
"poster_path": "/aUQKIpZZ31KWbpdHMCmaV76u78T.jpg",
"season_number": 1
}
],
"spoken_languages": [
{
"english_name": "English",
"iso_639_1": "en",
"name": "English"
}
],
"status": "Returning Series",
"tagline": "",
"type": "Scripted",
"vote_average": 8.924,
"vote_count": 601
}
"""
if not self.tv:
return {}
try:
log.info("ใMetaใๆญฃๅจๆฅ่ฏขTMDB็ต่งๅง๏ผ%s ..." % tmdbid)
tmdbinfo = self.tv.details(tmdbid, append_to_response)
if tmdbinfo:
log.info(f"ใMetaใ{tmdbid} ๆฅ่ฏข็ปๆ๏ผ{tmdbinfo.get('name')}")
return tmdbinfo or {}
except Exception as e:
print(str(e))
return None
def get_tmdb_tv_season_detail(self, tmdbid, season: int):
"""
่ทๅ็ต่งๅงๅญฃ็่ฏฆๆ
:param tmdbid: TMDB ID
:param season: ๅญฃ๏ผๆฐๅญ
:return: TMDBไฟกๆฏ
"""
"""
{
"_id": "5e614cd3357c00001631a6ef",
"air_date": "2023-01-15",
"episodes": [
{
"air_date": "2023-01-15",
"episode_number": 1,
"id": 2181581,
"name": "ๅฝไฝ ่ฟทๅคฑๅจ้ปๆไธญ",
"overview": "ๅจไธๅบๅ
จ็ๆง็ๆต่ก็
ๆงๆฏไบๆๆไนๅ๏ผไธไธช้กฝๅผบ็ๅนธๅญ่
่ด่ดฃ็
ง้กพไธไธช 14 ๅฒ็ๅฐๅฅณๅญฉ๏ผๅฅนๅฏ่ฝๆฏไบบ็ฑปๆๅ็ๅธๆใ",
"production_code": "",
"runtime": 81,
"season_number": 1,
"show_id": 100088,
"still_path": "/aRquEWm8wWF1dfa9uZ1TXLvVrKD.jpg",
"vote_average": 8,
"vote_count": 33,
"crew": [
{
"job": "Writer",
"department": "Writing",
"credit_id": "619c370063536a00619a08ee",
"adult": false,
"gender": 2,
"id": 35796,
"known_for_department": "Writing",
"name": "Craig Mazin",
"original_name": "Craig Mazin",
"popularity": 15.211,
"profile_path": "/uEhna6qcMuyU5TP7irpTUZ2ZsZc.jpg"
},
],
"guest_stars": [
{
"character": "Marlene",
"credit_id": "63c4ca5e5f2b8d00aed539fc",
"order": 500,
"adult": false,
"gender": 1,
"id": 1253388,
"known_for_department": "Acting",
"name": "Merle Dandridge",
"original_name": "Merle Dandridge",
"popularity": 21.679,
"profile_path": "/lKwHdTtDf6NGw5dUrSXxbfkZLEk.jpg"
}
]
},
],
"name": "็ฌฌ 1 ๅญฃ",
"overview": "",
"id": 144593,
"poster_path": "/aUQKIpZZ31KWbpdHMCmaV76u78T.jpg",
"season_number": 1
}
"""
if not self.tv:
return {}
try:
log.info("ใMetaใๆญฃๅจๆฅ่ฏขTMDB็ต่งๅง๏ผ%s๏ผๅญฃ๏ผ%s ..." % (tmdbid, season))
tmdbinfo = self.tv.season_details(tmdbid, season)
return tmdbinfo or {}
except Exception as e:
print(str(e))
return {}
def get_tmdb_tv_seasons_byid(self, tmdbid):
"""
ๆ นๆฎTMDBๆฅ่ฏขTMDB็ต่งๅง็ๆๆๅญฃ
"""
if not tmdbid:
return []
return self.get_tmdb_tv_seasons(
tv_info=self.__get_tmdb_tv_detail(
tmdbid=tmdbid
)
)
@staticmethod
def get_tmdb_tv_seasons(tv_info):
"""
ๆฅ่ฏขTMDB็ต่งๅง็ๆๆๅญฃ
:param tv_info: TMDB ็ๅญฃไฟกๆฏ
:return: ๅธฆๆseason_numberใepisode_count ็ๆฏๅญฃๆป้ๆฐ็ๅญๅ
ธๅ่กจ
"""
"""
"seasons": [
{
"air_date": "2006-01-08",
"episode_count": 11,
"id": 3722,
"name": "็นๅซ็ฏ",
"overview": "",
"poster_path": "/snQYndfsEr3Sto2jOmkmsQuUXAQ.jpg",
"season_number": 0
},
{
"air_date": "2005-03-27",
"episode_count": 9,
"id": 3718,
"name": "็ฌฌ 1 ๅญฃ",
"overview": "",
"poster_path": "/foM4ImvUXPrD2NvtkHyixq5vhPx.jpg",
"season_number": 1
}
]
"""
if not tv_info:
return []
ret_info = []
for info in tv_info.get("seasons") or []:
if not info.get("season_number"):
continue
ret_info.append({
"air_date": info.get("air_date"),
"episode_count": info.get("episode_count"),
"id": info.get("id"),
"name": info.get("name"),
"overview": info.get("overview"),
"poster_path": Config().get_tmdbimage_url(info.get("poster_path")) if info.get("poster_path") else "",
"season_number": info.get("season_number")
})
ret_info.reverse()
return ret_info
def get_tmdb_season_episodes(self, tmdbid, season: int):
"""
:param: tmdbid: TMDB ID
:param: season: ๅญฃๅท
"""
"""
ไปTMDB็ๅญฃ้ไฟกๆฏไธญ่ทๅพๆๅญฃ็้ไฟกๆฏ
"""
"""
"episodes": [
{
"air_date": "2023-01-15",
"episode_number": 1,
"id": 2181581,
"name": "ๅฝไฝ ่ฟทๅคฑๅจ้ปๆไธญ",
"overview": "ๅจไธๅบๅ
จ็ๆง็ๆต่ก็
ๆงๆฏไบๆๆไนๅ๏ผไธไธช้กฝๅผบ็ๅนธๅญ่
่ด่ดฃ็
ง้กพไธไธช 14 ๅฒ็ๅฐๅฅณๅญฉ๏ผๅฅนๅฏ่ฝๆฏไบบ็ฑปๆๅ็ๅธๆใ",
"production_code": "",
"runtime": 81,
"season_number": 1,
"show_id": 100088,
"still_path": "/aRquEWm8wWF1dfa9uZ1TXLvVrKD.jpg",
"vote_average": 8,
"vote_count": 33
},
]
"""
if not tmdbid:
return []
season_info = self.get_tmdb_tv_season_detail(tmdbid=tmdbid, season=season)
if not season_info:
return []
ret_info = []
for info in season_info.get("episodes") or []:
ret_info.append({
"air_date": info.get("air_date"),
"episode_number": info.get("episode_number"),
"id": info.get("id"),
"name": info.get("name"),
"overview": info.get("overview"),
"production_code": info.get("production_code"),
"runtime": info.get("runtime"),
"season_number": info.get("season_number"),
"show_id": info.get("show_id"),
"still_path": Config().get_tmdbimage_url(info.get("still_path")) if info.get("still_path") else "",
"vote_average": info.get("vote_average")
})
ret_info.reverse()
return ret_info
def get_tmdb_backdrop(self, mtype, tmdbid):
"""
่ทๅTMDB็่ๆฏๅพ
"""
if not tmdbid:
return ""
tmdbinfo = self.get_tmdb_info(mtype=mtype,
tmdbid=tmdbid,
append_to_response="images",
chinese=False)
if not tmdbinfo:
return ""
results = self.get_tmdb_backdrops(tmdbinfo=tmdbinfo, original=False)
return results[0] if results else ""
@staticmethod
def get_tmdb_backdrops(tmdbinfo, original=True):
"""
่ทๅTMDB็่ๆฏๅพ
"""
"""
{
"backdrops": [
{
"aspect_ratio": 1.778,
"height": 2160,
"iso_639_1": "en",
"file_path": "/qUroDlCDUMwRWbkyjZGB9THkMgZ.jpg",
"vote_average": 5.312,
"vote_count": 1,
"width": 3840
},
{
"aspect_ratio": 1.778,
"height": 2160,
"iso_639_1": "en",
"file_path": "/iyxvxEQIfQjzJJTfszZxmH5UV35.jpg",
"vote_average": 0,
"vote_count": 0,
"width": 3840
},
{
"aspect_ratio": 1.778,
"height": 720,
"iso_639_1": "en",
"file_path": "/8SRY6IcMKO1E5p83w7bjvcqklp9.jpg",
"vote_average": 0,
"vote_count": 0,
"width": 1280
},
{
"aspect_ratio": 1.778,
"height": 1080,
"iso_639_1": "en",
"file_path": "/erkJ7OxJWFdLBOcn2MvIdhTLHTu.jpg",
"vote_average": 0,
"vote_count": 0,
"width": 1920
}
]
}
"""
if not tmdbinfo:
return []
prefix_url = Config().get_tmdbimage_url(r"%s", prefix="original") \
if original else Config().get_tmdbimage_url(r"%s")
backdrops = tmdbinfo.get("images", {}).get("backdrops") or []
result = [prefix_url % backdrop.get("file_path") for backdrop in backdrops]
result.append(prefix_url % tmdbinfo.get("backdrop_path"))
return result
@staticmethod
def get_tmdb_season_episodes_num(tv_info, season: int):
"""
ไปTMDB็ๅญฃไฟกๆฏไธญ่ทๅพๅ
ทไฝๅญฃๆๅคๅฐ้
:param season: ๅญฃๅท๏ผๆฐๅญ
:param tv_info: ๅทฒ่ทๅ็TMDBๅญฃ็ไฟกๆฏ
:return: ่ฏฅๅญฃ็ๆป้ๆฐ
"""
if not tv_info:
return 0
seasons = tv_info.get("seasons")
if not seasons:
return 0
for sea in seasons:
if sea.get("season_number") == int(season):
return int(sea.get("episode_count"))
return 0
@staticmethod
def __dict_media_crews(crews):
"""
ๅญๅ
ธๅๅชไฝๅทฅไฝไบบๅ
"""
return [{
"id": crew.get("id"),
"gender": crew.get("gender"),
"known_for_department": crew.get("known_for_department"),
"name": crew.get("name"),
"original_name": crew.get("original_name"),
"popularity": crew.get("popularity"),
"image": Config().get_tmdbimage_url(crew.get("profile_path"), prefix="h632"),
"credit_id": crew.get("credit_id"),
"department": crew.get("department"),
"job": crew.get("job"),
"profile": 'https://www.themoviedb.org/person/%s' % crew.get('id')
} for crew in crews or []]
@staticmethod
def __dict_media_casts(casts):
"""
ๅญๅ
ธๅๅชไฝๆผ่ไบบๅ
"""
return [{
"id": cast.get("id"),
"gender": cast.get("gender"),
"known_for_department": cast.get("known_for_department"),
"name": cast.get("name"),
"original_name": cast.get("original_name"),
"popularity": cast.get("popularity"),
"image": Config().get_tmdbimage_url(cast.get("profile_path"), prefix="h632"),
"cast_id": cast.get("cast_id"),
"role": cast.get("character"),
"credit_id": cast.get("credit_id"),
"order": cast.get("order"),
"profile": 'https://www.themoviedb.org/person/%s' % cast.get('id')
} for cast in casts or []]
def get_tmdb_directors_actors(self, tmdbinfo):
"""
ๆฅ่ฏขๅฏผๆผๅๆผๅ
:param tmdbinfo: TMDBๅ
ๆฐๆฎ
:return: ๅฏผๆผๅ่กจ๏ผๆผๅๅ่กจ
"""
"""
"cast": [
{
"adult": false,
"gender": 2,
"id": 3131,
"known_for_department": "Acting",
"name": "Antonio Banderas",
"original_name": "Antonio Banderas",
"popularity": 60.896,
"profile_path": "/iWIUEwgn2KW50MssR7tdPeFoRGW.jpg",
"cast_id": 2,
"character": "Puss in Boots (voice)",
"credit_id": "6052480e197de4006bb47b9a",
"order": 0
}
],
"crew": [
{
"adult": false,
"gender": 2,
"id": 5524,
"known_for_department": "Production",
"name": "Andrew Adamson",
"original_name": "Andrew Adamson",
"popularity": 9.322,
"profile_path": "/qqIAVKAe5LHRbPyZUlptsqlo4Kb.jpg",
"credit_id": "63b86b2224b33300a0585bf1",
"department": "Production",
"job": "Executive Producer"
}
]
"""
if not tmdbinfo:
return [], []
_credits = tmdbinfo.get("credits")
if not _credits:
return [], []
directors = []
actors = []
for cast in self.__dict_media_casts(_credits.get("cast")):
if cast.get("known_for_department") == "Acting":
actors.append(cast)
for crew in self.__dict_media_crews(_credits.get("crew")):
if crew.get("job") == "Director":
directors.append(crew)
return directors, actors
def get_tmdb_cats(self, mtype, tmdbid):
"""
่ทๅTMDB็ๆผๅๅ่กจ
:param: mtype: ๅชไฝ็ฑปๅ
:param: tmdbid: TMDBID
"""
try:
if mtype == MediaType.MOVIE:
if not self.movie:
return []
return self.__dict_media_casts(self.movie.credits(tmdbid).get("cast"))
else:
if not self.tv:
return []
return self.__dict_media_casts(self.tv.credits(tmdbid).get("cast"))
except Exception as err:
print(str(err))
return []
@staticmethod
def get_tmdb_genres_names(tmdbinfo):
"""
ไปTMDBๆฐๆฎไธญ่ทๅ้ฃๆ ผๅ็งฐ
"""
"""
"genres": [
{
"id": 16,
"name": "ๅจ็ป"
},
{
"id": 28,
"name": "ๅจไฝ"
},
{
"id": 12,
"name": "ๅ้ฉ"
},
{
"id": 35,
"name": "ๅๅง"
},
{
"id": 10751,
"name": "ๅฎถๅบญ"
},
{
"id": 14,
"name": "ๅฅๅนป"
}
]
"""
if not tmdbinfo:
return ""
genres = tmdbinfo.get("genres") or []
genres_list = [genre.get("name") for genre in genres]
return ", ".join(genres_list) if genres_list else ""
def get_tmdb_genres(self, mtype):
"""
่ทๅTMDB็้ฃๆ ผๅ่กจ
:param: mtype: ๅชไฝ็ฑปๅ
"""
if not self.genre:
return []
try:
if mtype == MediaType.MOVIE:
return self.genre.movie_list()
else:
return self.genre.tv_list()
except Exception as err:
print(str(err))
return []
@staticmethod
def get_get_production_country_names(tmdbinfo):
"""
ไปTMDBๆฐๆฎไธญ่ทๅๅถ็ๅฝๅฎถๅ็งฐ
"""
"""
"production_countries": [
{
"iso_3166_1": "US",
"name": "็พๅฝ"
}
]
"""
if not tmdbinfo:
return ""
countries = tmdbinfo.get("production_countries") or []
countries_list = [country.get("name") for country in countries]
return ", ".join(countries_list) if countries_list else ""
@staticmethod
def get_tmdb_production_company_names(tmdbinfo):
"""
ไปTMDBๆฐๆฎไธญ่ทๅๅถ็ๅ
ฌๅธๅ็งฐ
"""
"""
"production_companies": [
{
"id": 2,
"logo_path": "/wdrCwmRnLFJhEoH8GSfymY85KHT.png",
"name": "DreamWorks Animation",
"origin_country": "US"
}
]
"""
if not tmdbinfo:
return ""
companies = tmdbinfo.get("production_companies") or []
companies_list = [company.get("name") for company in companies]
return ", ".join(companies_list) if companies_list else ""
@staticmethod
def get_tmdb_crews(tmdbinfo, nums=None):
"""
ไปTMDBๆฐๆฎไธญ่ทๅๅถ็ไบบๅ
"""
if not tmdbinfo:
return ""
crews = tmdbinfo.get("credits", {}).get("crew") or []
result = [{crew.get("name"): crew.get("job")} for crew in crews]
if nums:
return result[:nums]
else:
return result
def get_tmdb_en_title(self, media_info):
"""
่ทๅTMDB็่ฑๆๅ็งฐ
"""
en_info = self.get_tmdb_info(mtype=media_info.type,
tmdbid=media_info.tmdb_id,
language="en",
chinese=False)
if en_info:
return en_info.get("title") if media_info.type == MediaType.MOVIE else en_info.get("name")
return None
def get_tmdb_zhtw_title(self, media_info):
"""
่ทๅTMDB็็นไฝไธญๆๅ็งฐ
"""
zhtw_info = self.get_tmdb_info(mtype=media_info.type,
tmdbid=media_info.tmdb_id,
language="zh-TW",
chinese=False)
if zhtw_info:
return zhtw_info.get("title") if media_info.type == MediaType.MOVIE else zhtw_info.get("name")
return None
def get_episode_title(self, media_info, language=None):
"""
่ทๅๅง้็ๆ ้ข
"""
if media_info.type == MediaType.MOVIE:
return None
# ่ฎพ็ฝฎ่ฏญ่จ
self.__set_language(language)
if media_info.tmdb_id:
if not media_info.begin_episode:
return None
episodes = self.get_tmdb_season_episodes(tmdbid=media_info.tmdb_id,
season=int(media_info.get_season_seq()))
for episode in episodes:
if episode.get("episode_number") == media_info.begin_episode:
return episode.get("name")
return None
def get_movie_similar(self, tmdbid, page=1):
"""
ๆฅ่ฏข็ฑปไผผ็ตๅฝฑ
"""
if not self.movie:
return []
try:
movies = self.movie.similar(movie_id=tmdbid, page=page) or []
return self.__dict_tmdbinfos(movies, MediaType.MOVIE)
except Exception as e:
print(str(e))
return []
def get_movie_recommendations(self, tmdbid, page=1):
"""
ๆฅ่ฏข็ตๅฝฑๅ
ณ่ๆจ่
"""
if not self.movie:
return []
try:
movies = self.movie.recommendations(movie_id=tmdbid, page=page) or []
return self.__dict_tmdbinfos(movies, MediaType.MOVIE)
except Exception as e:
print(str(e))
return []
def get_tv_similar(self, tmdbid, page=1):
"""
ๆฅ่ฏข็ฑปไผผ็ต่งๅง
"""
if not self.tv:
return []
try:
tvs = self.tv.similar(tv_id=tmdbid, page=page) or []
return self.__dict_tmdbinfos(tvs, MediaType.TV)
except Exception as e:
print(str(e))
return []
def get_tv_recommendations(self, tmdbid, page=1):
"""
ๆฅ่ฏข็ต่งๅงๅ
ณ่ๆจ่
"""
if not self.tv:
return []
try:
tvs = self.tv.recommendations(tv_id=tmdbid, page=page) or []
return self.__dict_tmdbinfos(tvs, MediaType.TV)
except Exception as e:
print(str(e))
return []
def get_tmdb_discover(self, mtype, params=None, page=1):
"""
ๆต่ง็ตๅฝฑใ็ต่งๅง๏ผๅคๆ่ฟๆปคๆกไปถ๏ผ
"""
if not self.discover:
return []
try:
if mtype == MediaType.MOVIE:
movies = self.discover.discover_movies(params=params, page=page)
return self.__dict_tmdbinfos(infos=movies, mtype=mtype, poster_filter=True)
elif mtype == MediaType.TV:
tvs = self.discover.discover_tv_shows(params=params, page=page)
return self.__dict_tmdbinfos(infos=tvs, mtype=mtype, poster_filter=True)
except Exception as e:
print(str(e))
return []
def get_tmdb_discover_movies_pages(self, params=None):
"""
่ทๅ็ตๅฝฑๆต่ง็ๆป้กตๆฐ
"""
if not self.discover:
return 0
try:
return self.discover.discover_movies_pages(params=params)
except Exception as e:
print(str(e))
return 0
def get_person_medias(self, personid, mtype=None, page=1):
"""
ๆฅ่ฏขไบบ็ฉ็ธๅ
ณๅฝฑ่งไฝๅ
"""
if not self.person:
return []
try:
if mtype == MediaType.MOVIE:
movies = self.person.movie_credits(person_id=personid) or []
result = self.__dict_tmdbinfos(movies, mtype)
elif mtype:
tvs = self.person.tv_credits(person_id=personid) or []
result = self.__dict_tmdbinfos(tvs, mtype)
else:
medias = self.person.combined_credits(person_id=personid) or []
result = self.__dict_tmdbinfos(medias)
return result[(page - 1) * 20: page * 20]
except Exception as e:
print(str(e))
return []
@staticmethod
def __search_engine(feature_name):
"""
่พ
ๅฉ่ฏๅซๅ
ณ้ฎๅญ
"""
is_movie = False
if not feature_name:
return None, is_movie
# ๅ้คไธๅฟ
่ฆๅญ็ฌฆ
feature_name = re.compile(r"^\w+ๅญๅน[็ป็คพ]?", re.IGNORECASE).sub("", feature_name)
backlist = sorted(KEYWORD_BLACKLIST, key=lambda x: len(x), reverse=True)
for single in backlist:
feature_name = feature_name.replace(single, " ")
if not feature_name:
return None, is_movie
def cal_score(strongs, r_dict):
for i, s in enumerate(strongs):
if len(strongs) < 5:
if i < 2:
score = KEYWORD_SEARCH_WEIGHT_3[0]
else:
score = KEYWORD_SEARCH_WEIGHT_3[1]
elif len(strongs) < 10:
if i < 2:
score = KEYWORD_SEARCH_WEIGHT_2[0]
else:
score = KEYWORD_SEARCH_WEIGHT_2[1] if i < (len(strongs) >> 1) else KEYWORD_SEARCH_WEIGHT_2[2]
else:
if i < 2:
score = KEYWORD_SEARCH_WEIGHT_1[0]
else:
score = KEYWORD_SEARCH_WEIGHT_1[1] if i < (len(strongs) >> 2) else KEYWORD_SEARCH_WEIGHT_1[
2] if i < (
len(strongs) >> 1) \
else KEYWORD_SEARCH_WEIGHT_1[3] if i < (len(strongs) >> 2 + len(strongs) >> 1) else \
KEYWORD_SEARCH_WEIGHT_1[
4]
if r_dict.__contains__(s.lower()):
r_dict[s.lower()] += score
continue
r_dict[s.lower()] = score
bing_url = "https://www.cn.bing.com/search?q=%s&qs=n&form=QBRE&sp=-1" % feature_name
baidu_url = "https://www.baidu.com/s?ie=utf-8&tn=baiduhome_pg&wd=%s" % feature_name
res_bing = RequestUtils(timeout=5).get_res(url=bing_url)
res_baidu = RequestUtils(timeout=5).get_res(url=baidu_url)
ret_dict = {}
if res_bing and res_bing.status_code == 200:
html_text = res_bing.text
if html_text:
html = etree.HTML(html_text)
strongs_bing = list(
filter(lambda x: (0 if not x else difflib.SequenceMatcher(None, feature_name,
x).ratio()) > KEYWORD_STR_SIMILARITY_THRESHOLD,
map(lambda x: x.text, html.cssselect(
"#sp_requery strong, #sp_recourse strong, #tile_link_cn strong, .b_ad .ad_esltitle~div strong, h2 strong, .b_caption p strong, .b_snippetBigText strong, .recommendationsTableTitle+.b_slideexp strong, .recommendationsTableTitle+table strong, .recommendationsTableTitle+ul strong, .pageRecoContainer .b_module_expansion_control strong, .pageRecoContainer .b_title>strong, .b_rs strong, .b_rrsr strong, #dict_ans strong, .b_listnav>.b_ans_stamp>strong, #b_content #ans_nws .na_cnt strong, .adltwrnmsg strong"))))
if strongs_bing:
title = html.xpath("//aside//h2[@class = \" b_entityTitle\"]/text()")
if len(title) > 0:
if title:
t = re.compile(r"\s*\(\d{4}\)$").sub("", title[0])
ret_dict[t] = 200
if html.xpath("//aside//div[@data-feedbk-ids = \"Movie\"]"):
is_movie = True
cal_score(strongs_bing, ret_dict)
if res_baidu and res_baidu.status_code == 200:
html_text = res_baidu.text
if html_text:
html = etree.HTML(html_text)
ems = list(
filter(lambda x: (0 if not x else difflib.SequenceMatcher(None, feature_name,
x).ratio()) > KEYWORD_STR_SIMILARITY_THRESHOLD,
map(lambda x: x.text, html.cssselect("em"))))
if len(ems) > 0:
cal_score(ems, ret_dict)
if not ret_dict:
return None, False
ret = sorted(ret_dict.items(), key=lambda d: d[1], reverse=True)
log.info("ใMetaใๆจๆญๅ
ณ้ฎๅญไธบ๏ผ%s ..." % ([k[0] for i, k in enumerate(ret) if i < 4]))
if len(ret) == 1:
keyword = ret[0][0]
else:
pre = ret[0]
nextw = ret[1]
if nextw[0].find(pre[0]) > -1:
# ๆปกๅ็ดๆฅๅคๅฎ
if int(pre[1]) >= 100:
keyword = pre[0]
# ๅพๅ็ธๅทฎ30 ไปฅไธ๏ผ ้ๅ้ซ
elif int(pre[1]) - int(nextw[1]) > KEYWORD_DIFF_SCORE_THRESHOLD:
keyword = pre[0]
# ้ๅค็ไธ้
elif nextw[0].replace(pre[0], "").strip() == pre[0]:
keyword = pre[0]
# ็บฏๆฐๅญไธ้
elif pre[0].isdigit():
keyword = nextw[0]
else:
keyword = nextw[0]
else:
keyword = pre[0]
log.info("ใMetaใ้ๆฉๅ
ณ้ฎๅญไธบ๏ผ%s " % keyword)
return keyword, is_movie
@staticmethod
def __get_genre_ids_from_detail(genres):
"""
ไปTMDB่ฏฆๆ
ไธญ่ทๅgenre_idๅ่กจ
"""
if not genres:
return []
genre_ids = []
for genre in genres:
genre_ids.append(genre.get('id'))
return genre_ids
@staticmethod
def __get_tmdb_chinese_title(tmdbinfo):
"""
ไปๅซๅไธญ่ทๅไธญๆๆ ้ข
"""
if not tmdbinfo:
return None
if tmdbinfo.get("media_type") == MediaType.MOVIE:
alternative_titles = tmdbinfo.get("alternative_titles", {}).get("titles", [])
else:
alternative_titles = tmdbinfo.get("alternative_titles", {}).get("results", [])
for alternative_title in alternative_titles:
iso_3166_1 = alternative_title.get("iso_3166_1")
if iso_3166_1 == "CN":
title = alternative_title.get("title")
if title and StringUtils.is_chinese(title) and zhconv.convert(title, "zh-hans") == title:
return title
return tmdbinfo.get("title") if tmdbinfo.get("media_type") == MediaType.MOVIE else tmdbinfo.get("name")
def get_tmdbperson_chinese_name(self, person_id=None, person_info=None):
"""
ๆฅ่ฏขTMDBไบบ็ฉไธญๆๅ็งฐ
"""
if not self.person:
return ""
if not person_info and not person_id:
return ""
# ่ฟๅไธญๆๅ
name = ""
# ๆๆๅซๅ
alter_names = []
try:
if not person_info:
person_info = self.person.details(person_id)
if person_info:
aka_names = person_info.get("also_known_as", []) or []
else:
return ""
except Exception as err:
print(str(err))
return ""
for aka_name in aka_names:
if StringUtils.is_chinese(aka_name):
alter_names.append(aka_name)
if len(alter_names) == 1:
name = alter_names[0]
elif len(alter_names) > 1:
for alter_name in alter_names:
if alter_name == zhconv.convert(alter_name, 'zh-hans'):
name = alter_name
return name
def get_tmdbperson_aka_names(self, person_id):
"""
ๆฅ่ฏขไบบ็ฉๅๅ
"""
if not self.person:
return []
try:
aka_names = self.person.details(person_id).get("also_known_as", []) or []
return aka_names
except Exception as err:
print(str(err))
return []
def get_random_discover_backdrop(self):
"""
่ทๅTMDB็ญ้จ็ตๅฝฑ้ๆบไธๅผ ่ๆฏๅพ
"""
if not self.discover:
return ""
try:
medias = self.discover.discover_movies(params={"sort_by": "popularity.desc"})
if medias:
# ้ๆบไธไธช็ตๅฝฑ
media = random.choice(medias)
img_url = Config().get_tmdbimage_url(media.get("backdrop_path"), prefix="original") \
if media.get("backdrop_path") else ''
img_title = media.get('title', '')
img_link = f"https://www.themoviedb.org/movie/{media.get('id')}" if media.get('id') else ''
return img_url, img_title, img_link
except Exception as err:
print(str(err))
return '', '', ''
def save_rename_cache(self, file_name, cache_info):
"""
ๅฐๆๅจ่ฏๅซ็ไฟกๆฏๅ ๅ
ฅ็ผๅญ
"""
if not file_name or not cache_info:
return
meta_info = MetaInfo(title=file_name)
self.__insert_media_cache(self.__make_cache_key(meta_info), cache_info)
@staticmethod
def merge_media_info(target, source):
"""
ๅฐsoruceไธญๆๆ็ไฟกๆฏๅๅนถๅฐtargetไธญๅนถ่ฟๅ
"""
target.set_tmdb_info(source.tmdb_info)
target.fanart_poster = source.get_poster_image()
target.fanart_backdrop = source.get_backdrop_image()
target.set_download_info(download_setting=source.download_setting,
save_path=source.save_path)
return target
def get_tmdbid_by_imdbid(self, imdbid):
"""
ๆ นๆฎIMDBIDๆฅ่ฏขTMDBไฟกๆฏ
"""
if not self.find:
return None
try:
result = self.find.find_by_imdbid(imdbid) or {}
tmdbinfo = result.get('movie_results') or result.get("tv_results")
if tmdbinfo:
tmdbinfo = tmdbinfo[0]
return tmdbinfo.get("id")
except Exception as err:
print(str(err))
return None
@staticmethod
def get_detail_url(mtype, tmdbid):
"""
่ทๅTMDB/่ฑ็ฃ่ฏฆๆ
้กตๅฐๅ
"""
if not tmdbid:
return ""
if str(tmdbid).startswith("DB:"):
return "https://movie.douban.com/subject/%s" % str(tmdbid).replace("DB:", "")
elif mtype == MediaType.MOVIE:
return "https://www.themoviedb.org/movie/%s" % tmdbid
else:
return "https://www.themoviedb.org/tv/%s" % tmdbid
def get_episode_images(self, tv_id, season_id, episode_id, orginal=False):
"""
่ทๅๅง้ไธญๆไธ้ๅฐ้ข
"""
if not self.episode:
return ""
if not tv_id or not season_id or not episode_id:
return ""
res = self.episode.images(tv_id, season_id, episode_id)
if res:
if orginal:
return Config().get_tmdbimage_url(res[-1].get("file_path"), prefix="original")
else:
return Config().get_tmdbimage_url(res[-1].get("file_path"))
else:
return ""
def get_tmdb_factinfo(self, media_info):
"""
่ทๅTMDBๅๅธไฟกๆฏ
"""
result = []
if media_info.vote_average:
result.append({"่ฏๅ": media_info.vote_average})
if media_info.original_title:
result.append({"ๅๅงๆ ้ข": media_info.original_title})
status = media_info.tmdb_info.get("status")
if status:
result.append({"็ถๆ": status})
if media_info.release_date:
result.append({"ไธๆ ๆฅๆ": media_info.release_date})
revenue = media_info.tmdb_info.get("revenue")
if revenue:
result.append({"ๆถๅ
ฅ": StringUtils.str_amount(revenue)})
budget = media_info.tmdb_info.get("budget")
if budget:
result.append({"ๆๆฌ": StringUtils.str_amount(budget)})
if media_info.original_language:
result.append({"ๅๅง่ฏญ่จ": media_info.original_language})
production_country = self.get_get_production_country_names(tmdbinfo=media_info.tmdb_info)
if media_info.networks:
result.append({"็ต่ง็ฝ": media_info.networks})
if production_country:
result.append({"ๅบๅๅฝๅฎถ": production_country}),
production_company = self.get_tmdb_production_company_names(tmdbinfo=media_info.tmdb_info)
if production_company:
result.append({"ๅถไฝๅ
ฌๅธ": production_company})
return result
| [] |
2024-01-10 | ilanhuang/audio2face-streamgpt-public | exts~stream.gptchat~stream~gptchat~recording_transcription.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import pyaudio
import wave
import keyboard
import time
from time import sleep
import openai
import datetime
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def timestamp_to_datetime(unix_time):
return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z")
def record_client_voice(output_filename, recording_status):
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
frames = []
p = pyaudio.PyAudio()
stream = None
try:
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
start_time = time.time()
min_duration = 0.1
while recording_status() or time.time() - start_time < min_duration:
data = stream.read(CHUNK)
frames.append(data)
except Exception as e:
print(f"Error while recording audio: {e}")
finally:
if stream is not None:
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(output_filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
return output_filename
def transcribe_audio_to_text(file_path):
with open(file_path, 'rb') as audio_file:
transcript_response = openai.Audio.transcribe("whisper-1", audio_file)
return transcript_response["text"] | [] |
2024-01-10 | ilanhuang/audio2face-streamgpt-public | exts~stream.gptchat~stream~gptchat~extension.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import omni.ext
import sys
sys.path.append("C:\\Users\\ERKS 2\\Documents\\Omniverse\\ov\\pkg\\audio2face-2022.2.1\\exts\\omni.audio2face.player\omni\\audio2face\\player\\scripts\\streaming_server")
import openai
import carb
from .window import AudioChatWindow
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
openai.api_key = AudioChatWindow.get_openai_api_key()
self._window = AudioChatWindow("VIRTUAL ASSISTANT", width=400, height=525)
def on_shutdown(self):
self._window.destroy()
self._window = None
| [] |
2024-01-10 | clintonjules/cm1_code_assessment | task5~task5.py | import discord
from discord.ext import commands
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
# Define the intents
intents = discord.Intents.all()
bot = commands.Bot(command_prefix='!',intents=intents)
client = discord.Client(intents=intents)
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(
"The following is a friendly conversation between a human and an AI. The AI is talkative and "
"provides lots of specific details from its context. If the AI does not know the answer to a "
"question, it truthfully says it does not know."
),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
memory = ConversationBufferMemory(return_messages=True)
conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm)
@bot.event
async def on_ready():
print("bot online")
@bot.event
async def on_member_join(member):
guild = bot.get_guild() # guild id
channel = guild.get_channel() # welcome channel id
await channel.send(f"Hello {member.mention}!")
await channel.send("Here's a joke for you:")
joke = conversation.predict(input=f"Make a joke about the name {member} only type out the joke")
joke = '\n'.join(joke.split('\n')[1:])
await channel.send(joke)
await channel.send("Any questions you want to ask? (Place a '$' infront when doing so)")
@bot.event
async def on_message(message):
if message.author.bot:
return
query = message.content[1:]
if message.content.startswith('$'):
await message.channel.send(conversation.predict(input=query))
bot.run('YOUR_KEY')
| [
"The following is a friendly conversation between a human and an AI. The AI is talkative and ",
"{input}",
"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"provides lots of specific details from its context. If the AI does not know the answer to a ",
"question, it truthfully says it does not know."
] |
2024-01-10 | clintonjules/cm1_code_assessment | task3~task3.py | import os
# Enter yuor OpenAI key
os.environ["OPENAI_API_KEY"] = 'YOUR_KEy'
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(
"The following is a friendly conversation between a human and an AI. The AI is talkative and "
"provides lots of specific details from its context. If the AI does not know the answer to a "
"question, it truthfully says it does not know."
),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
memory = ConversationBufferMemory(return_messages=True)
conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm)
print(conversation.predict(input="Hi there! My name is Clint"))
print(conversation.predict(input="Tell em a joke about my name"))
print(conversation.predict(input="What are the last 2 message we exchanged?"))
print("-------------SystemMessagePromptTemplate---------------\n")
print(prompt.messages[0])
| [
"The following is a friendly conversation between a human and an AI. The AI is talkative and ",
"{input}",
"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"provides lots of specific details from its context. If the AI does not know the answer to a ",
"question, it truthfully says it does not know."
] |
2024-01-10 | wbivanco/matriz_qa | ai~matrix_generator.py | from dotenv import load_dotenv
import os
from os.path import abspath, dirname
import openai
import pandas as pd
from tqdm import tqdm
import json
# Cargo variables de configuraciรณn.
load_dotenv()
# Configuraciรณn.
os.environ["OPENAI_API_KEY"] = os.getenv('API_KEY')
openai.api_type = os.getenv('API_TYPE')
openai.api_version = os.getenv('API_VERSION')
openai.api_base = os.getenv('API_BASE')
openai.api_key = os.getenv('API_KEY')
def extraction(messages, engine=os.getenv('CHAT_ENGINE_16K'), temperature=0.1, top_p = 0.9):
"""
Extracts information from a document containing test requirements and identifies all test cases and their expected results.
Parameters:
- messages (list): A list of messages exchanged between the user and the chatbot.
- engine (str): The engine to use for generating responses. Default is CHAT_ENGINE_16K.
- temperature (float): The temperature parameter for response generation. Default is 0.1.
- top_p (float): The top-p parameter for response generation. Default is 0.9.
Returns:
- str: The extracted content containing the identified test cases and their expected results.
"""
messages_full = [{"role": "system", "content": """Sos parte del equipo de testing de una compania de telecomunicaciones.
- Vas a recibir un documento con los requerimientos para testeo de varios de los modulos de una aplicacion y debes identificar TODOS los casos de prueba presentes en รฉl y su resultado esperado.
"""
}] + messages
timeout = 10
try:
response = openai.ChatCompletion.create(
engine=engine,
messages=messages_full,
temperature=temperature,
top_p=top_p,
timeout=timeout
)
except openai.OpenAIError as e:
print("Error al realizar la solicitud:", str(e))
return None
# Medir la respuesta de los tokens.
prompt_tokens = response['usage']['prompt_tokens']
completion_tokens = response['usage']['completion_tokens']
choice = response.choices[0]
message = choice.get("message")
if message is not None:
content = message.get("content")
if content is not None:
return {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"content": content
}
print("La respuesta no contiene la clave 'content'")
return None
def build_message(visit):
"""
This function takes a 'visit' parameter and generates a prompt message based on the given document.
The prompt message includes instructions for identifying and enumerating test cases, their expected results,
the component type they refer to, and the global functionality mentioned in the document.
The function returns the extracted information in the form of a JSON string.
Parameters:
- visita: The document to be analyzed.
Returns:
- JSON string containing the extracted information.
"""
prompt = f"""Te voy a dar un contexto y un documento, en base al documento:
- Identifica y enumeras TODOS los casos de prueba de testing de aplicaciones presentes (escritos) y su resultado esperado. Un ejemplo de caso de prueba es 'Validar que al darle clic en la opcion pasate un plan nos mande al la pantalla de "Pasate a un plan"' y su resultado esperado es 'Se debe mostrar la pantalla de "Pasate a un plan"'. Otro ejemplo de caso de prueba es 'Validar que al seleccionar el botรณn continuar permita avanzar a la pantalla de check out' y su resultado esperado es 'Se debe mostrar la pantalla de check out'.
- Identifica el tipo de componente de la aplicaciรณn al que hace referencia el caso de prueba (por ejemplo: 'botรณn continuar', 'pantalla', 'botรณn pasarme a un plan', 'Inicio de sesiรณn', 'switch flujo de migraciones', 'parrillas', 'menรบ hamburguesa', 'campo RFC', 'Banner', 'Spinner', 'checkout', 'check box') y coloca este resultado en el campo 'componente'.
- Ten en cuenta que el componente tiene como mรกximo 5 palabras para ubicar la secciรณn de la app, encambio el caso de prueba contiene una descripciรณn mรกs larga de la acciรณn que hay que realizar.
- Haz distinciรณn de los casos que hablan del mantenedor y los que hablan de la app del usuario, coloca este resultado en el campo 'tipo'.
- Ademรกs, debes identificar la funcionalidad global a la que hace referencia el texto completo, esta se encuentra generalmente al comienzo del documento. Por ejemplo: 'MANTENEDOR โ SWITCH FLUJO DE MIGRACIONES-DESACTIVADO', 'MANTENEDOR โ CONFIGURACIรN DE PARRILLAS - SWITCH MOSTRAR EN EL APP โ PLAN 2 โ DESACTIVADO', 'MIGRACIONES โ FILTRO / RANGO DE FECHAS' o descripciones similares. Este valor debes repetirlo para todos los casos de prueba que se encuentren en el documento y almacenarlo en el campo 'funcionalidad'. La funcionalidad ES IGUAL para todos los casos de prueba de un mismo documento, ignora la separaciรณn de mantenedor y app para el campo funcionalidad.
- La salida debe ser SOLAMENTE un JSON con la informacion encontrada en el texto siguiendo la estructura:
{{ "1": {{
"funcionalidad": extrae la funcionalidad y colocala aqui,
"tipo": "mantenedor" o "aplicaciรณn",
"componente": extrae el componente y colocalo aqui,
"caso de prueba": extrae el caso de prueba y colocalo aqui,
"resultado esperado": extrae el resultado esperado del caso de prueba y colocalo aqui,
}},
"2": {{
"funcionalidad": extrae la funcionalidad y colocala aqui,
"tipo": "mantenedor" o "aplicaciรณn",
"componente": extrae el componente y colocalo aqui,
"caso de prueba": extrae el caso de prueba y colocalo aqui,
"resultado esperado": extrae el resultado esperado del caso de prueba y colocalo aqui,
}},
}}
- La salida debe ser un JSON que se pueda leer mediante json.loads() en python, incluye siempre los separadores correspondientes para que funcione la lectura.
Documento:{visit}"""
message = [{"role": "user", "content": prompt}]
return extraction(message)
def preprocess_docx(docx):
"""
Preprocesses a docx file by splitting it into chunks based on the '#' character.
Args:
docx (str): The content of the docx file.
Returns:
tuple: A tuple containing the context (first chunk) and the documents (remaining chunks).
"""
# Separa cada tรญtulo del docx en un chunk:
# Reemplazar '.\n-' por '#' para identificar cada tรญtulo.
# separar texto en chunks por caracter '#'.
docx_md = docx.replace('.\n-', '#')
docx_md = docx_md.replace('\nโ', '#')
chunks_md = docx_md.split('#')
context = chunks_md[0]
documents = chunks_md[1:]
return context, documents
def generate_response(documents):
"""
Generates a response by processing a list of documents.
Args:
documentos (list): A list of documents to process.
Returns:
pandas.DataFrame: A DataFrame containing the generated results.
"""
prompt_tokens = 0
completion_tokens = 0
# Inicializa un DataFrame vacรญo con las columnas deseadas.
results = pd.DataFrame(columns=['funcionalidad','tipo','componente','caso de prueba', 'resultado esperado'])
# Itera sobre cada chunk en 'documents'.
for chunk in tqdm(documents):
# Llama a 'build_message' con el chunk como argumento.
result_dict = build_message(chunk)
test_cases = result_dict['content']
# Convierte la cadena de texto en un diccionario.
#print(f"*****CASOs DE PRUEBA {chunk}******")
#print(casos_de_prueba)
test_cases_dict = json.loads(test_cases)
prompt_tokens += result_dict['prompt_tokens']
completion_tokens += result_dict['completion_tokens']
# Convierte el diccionario anidado en un DataFrame y aรฑรกdelo al DataFrame de resultados.
for key, value in test_cases_dict.items():
df = pd.DataFrame(value, index=[0])
results = pd.concat([results, df], ignore_index=True)
costo = f'El costo de la presente ejecuciรณn es u$s: {(prompt_tokens/1000)*0.003 + (completion_tokens/1000)*0.004}'
# Ahora 'resultados' es un DataFrame que contiene todos los casos de prueba de todos los chunks.
return results, costo
def generate_matrix(filename, mode='online'):
if mode == 'local':
from libs.docx_parser import getDoc
docx = '../static/input/' + filename
output_path = '../static/output/'
else:
from ai.libs.docx_parser import getDoc
docx = 'static/input/' + filename
output_path = 'static/output/'
# Cargar el relative path del archivo que se quiere procesar.
#path = dirname(dirname(dirname(abspath(__file__))))+'\\1.4 Datos\CP_Migraciones.docx'
docx_file = getDoc(docx)
context, document = preprocess_docx(docx_file)
result, cost = generate_response(document)
# Guarda los resultados en un archivo CSV que se lean รฑ y tildes.
#output_file = output_path + 'resultados_generados.csv'
#result.to_csv(output_file, index=False, encoding='utf-8-sig')
# Guarda los resultados en un archivo Excel.
output_file = output_path + 'resultados_generados.xlsx'
result.to_excel(output_file, sheet_name='Resultados', index=False)
if mode == 'local':
print(cost)
else:
msg = "Proceso terminado exitosamente (procesado: " + filename + ") puede consultar la matrรญz generada."
cost = "El costo es de u$s 0.25."
return (cost, msg)
################# EL CODIGO DE ABAJO SE USA PARA CORRER LOCAL #####################
generate_matrix('cp_migraciones.docx', 'local')
| [
"0",
"Te voy a dar un contexto y un documento, en base al documento: \n - Identifica y enumeras TODOS los casos de prueba de testing de aplicaciones presentes (escritos) y su resultado esperado. Un ejemplo de caso de prueba es 'Validar que al darle clic en la opcion pasate un plan nos mande al la pantalla de \"Pasate a un plan\"' y su resultado esperado es 'Se debe mostrar la pantalla de \"Pasate a un plan\"'. Otro ejemplo de caso de prueba es 'Validar que al seleccionar el botรณn continuar permita avanzar a la pantalla de check out' y su resultado esperado es 'Se debe mostrar la pantalla de check out'.\n - Identifica el tipo de componente de la aplicaciรณn al que hace referencia el caso de prueba (por ejemplo: 'botรณn continuar', 'pantalla', 'botรณn pasarme a un plan', 'Inicio de sesiรณn', 'switch flujo de migraciones', 'parrillas', 'menรบ hamburguesa', 'campo RFC', 'Banner', 'Spinner', 'checkout', 'check box') y coloca este resultado en el campo 'componente'. \n - Ten en cuenta que el componente tiene como mรกximo 5 palabras para ubicar la secciรณn de la app, encambio el caso de prueba contiene una descripciรณn mรกs larga de la acciรณn que hay que realizar.\n - Haz distinciรณn de los casos que hablan del mantenedor y los que hablan de la app del usuario, coloca este resultado en el campo 'tipo'.\n - Ademรกs, debes identificar la funcionalidad global a la que hace referencia el texto completo, esta se encuentra generalmente al comienzo del documento. Por ejemplo: 'MANTENEDOR โ SWITCH FLUJO DE MIGRACIONES-DESACTIVADO', 'MANTENEDOR โ CONFIGURACIรN DE PARRILLAS - SWITCH MOSTRAR EN EL APP โ PLAN 2 โ DESACTIVADO', 'MIGRACIONES โ FILTRO / RANGO DE FECHAS' o descripciones similares. Este valor debes repetirlo para todos los casos de prueba que se encuentren en el documento y almacenarlo en el campo 'funcionalidad'. La funcionalidad ES IGUAL para todos los casos de prueba de un mismo documento, ignora la separaciรณn de mantenedor y app para el campo funcionalidad.\n - La salida debe ser SOLAMENTE un JSON con la informacion encontrada en el texto siguiendo la estructura: \n { \"1\": {\n \"funcionalidad\": extrae la funcionalidad y colocala aqui,\n \"tipo\": \"mantenedor\" o \"aplicaciรณn\",\n \"componente\": extrae el componente y colocalo aqui,\n \"caso de prueba\": extrae el caso de prueba y colocalo aqui,\n \"resultado esperado\": extrae el resultado esperado del caso de prueba y colocalo aqui,\n },\n \"2\": {\n \"funcionalidad\": extrae la funcionalidad y colocala aqui,\n \"tipo\": \"mantenedor\" o \"aplicaciรณn\",\n \"componente\": extrae el componente y colocalo aqui,\n \"caso de prueba\": extrae el caso de prueba y colocalo aqui,\n \"resultado esperado\": extrae el resultado esperado del caso de prueba y colocalo aqui,\n }, \n }\n - La salida debe ser un JSON que se pueda leer mediante json.loads() en python, incluye siempre los separadores correspondientes para que funcione la lectura. \n Documento:PLACEHOLDER",
"Sos parte del equipo de testing de una compania de telecomunicaciones.\n - Vas a recibir un documento con los requerimientos para testeo de varios de los modulos de una aplicacion y debes identificar TODOS los casos de prueba presentes en รฉl y su resultado esperado.\n ",
"prompt_tokens"
] |
2024-01-10 | RKP64/BambooAI | bambooai~bambooai.py |
import os
import re
import sys
import base64
import json
from contextlib import redirect_stdout
import io
import time
import openai
import pandas as pd
from termcolor import colored, cprint
from IPython.display import display, Image, HTML
import warnings
warnings.filterwarnings('ignore')
#Running as a script
#import prompts
#import func_calls
#import qa_retrieval
#Running as a package
from . import prompts
from . import func_calls
from . import qa_retrieval
class BambooAI:
def __init__(self, df: pd.DataFrame,
max_conversations: int = 4,
llm: str = 'gpt-3.5-turbo-0613',
llm_func: str = 'gpt-3.5-turbo-0613',
llm_16k: str = 'gpt-3.5-turbo-16k',
llm_gpt4: str = 'gpt-4-0613',
debug: bool = False,
vector_db: bool = False,
llm_switch: bool = False,
exploratory: bool = True,
):
self.API_KEY = os.environ.get('OPENAI_API_KEY')
# Check if the OPENAI_API_KEY environment variable is set
if not self.API_KEY:
raise EnvironmentError("OPENAI_API_KEY environment variable not found.")
# Check if the PINECONE_API_KEY and PINECONE_ENV environment variables are set if vector_db is True
if vector_db:
PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
PINECONE_ENV = os.getenv('PINECONE_ENV')
if PINECONE_API_KEY is None or PINECONE_ENV is None:
print("Warning: PINECONE_API_KEY or PINECONE_ENV environment variable not found. Disabling vector_db.")
vector_db = False
self.MAX_ERROR_CORRECTIONS = 5
# Set the maximum number of question/answer pairs to be kept in the conversation memmory
self.MAX_CONVERSATIONS = (max_conversations*2) - 1
# Store the original dataframe. This will be used to reset the dataframe before executing the code
self.original_df = df
self.df = df.copy() # make a copy of the dataframe
self.df_head = self.original_df.head(1)
self.df_columns = self.df.columns.tolist()
# LLMs
self.llm = llm
self.llm_func = llm_func
self.llm_16k = llm_16k
self.llm_gpt4 = llm_gpt4
# Set the debug mode. This mode is True when you want the model to debug the code and correct it.
self.debug = debug
# Set the llm_switch mode. This mode is True when you want the model to switch to gpt-4 for debugging, error correction and ranking.
self.llm_switch = llm_switch
# Set the rank mode. This mode is True when you want the model to rank the generated code.
self.vector_db = vector_db
# Set the exploratory mode. This mode is True when you want the model to evaluate the original prompt and break it down in algorithm.
self.exploratory = exploratory
# Prompts
self.default_example_output = prompts.example_output
self.task_evaluation = prompts.task_evaluation
self.system_task = prompts.system_task
self.user_task = prompts.user_task
self.error_correct_task = prompts.error_correct_task
self.debug_code_task = prompts.debug_code_task
self.rank_answer = prompts.rank_answer
self.solution_insights = prompts.solution_insights
# Functions
self.task_eval_function = func_calls.task_eval_function
self.insights_function = func_calls.solution_insights_function
# QA Retrieval
self.add_question_answer_pair = qa_retrieval.add_question_answer_pair
self.retrieve_answer = qa_retrieval.retrieve_answer
self.similarity_threshold = 0.8
openai.api_key = self.API_KEY
# Initialize the total tokens used list. This list will be used to keep track of the total tokens used by the model
self.total_tokens_used = []
def llm_call(self, messages: str, temperature: float = 0, max_tokens: int = 1000, llm_cascade: bool = False):
model = self.llm
if llm_cascade:
model = self.llm_gpt4
try:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
except openai.error.RateLimitError:
print(
"The OpenAI API rate limit has been exceeded. Waiting 10 seconds and trying again."
)
time.sleep(10)
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
# Exceeded the maximum number of tokens allowed by the API
except openai.error.InvalidRequestError:
print(
"The OpenAI API maximum tokens limit has been exceeded. Switching to a 16K model."
)
response = openai.ChatCompletion.create(
model=self.llm_16k,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
content = response.choices[0].message.content.strip()
tokens_used = response.usage.total_tokens
return content, tokens_used
def llm_func_call(self, messages, functions, function_name):
try:
response = openai.ChatCompletion.create(
model = self.llm_func,
messages=messages,
functions=functions,
function_call = function_name,
temperature=0,
max_tokens = 700,
)
except openai.error.RateLimitError:
print(
"The OpenAI API rate limit has been exceeded. Waiting 10 seconds and trying again."
)
time.sleep(10)
response = openai.ChatCompletion.create(
model = self.llm_func,
messages=messages,
functions=functions,
function_call = function_name,
temperature=0,
)
fn_name = response.choices[0].message["function_call"].name
arguments = response.choices[0].message["function_call"].arguments
tokens_used = response.usage.total_tokens
return fn_name,arguments,tokens_used
# Functions to sanitize the output from the LLM
def _extract_code(self,response: str, separator: str = "```") -> str:
# Define a blacklist of Python keywords and functions that are not allowed
blacklist = ['os','subprocess','sys','eval','exec','file','socket','urllib',
'shutil','pickle','ctypes','multiprocessing','tempfile','glob','code','pty','commands',
'requests','cgi','cgitb','xml.etree.ElementTree','builtins'
]
# Search for a pattern between <code> and </code> in the extracted code
match = re.search(r"<code>(.*)</code>", response, re.DOTALL)
if match:
# If a match is found, extract the code between <code> and </code>
code = match.group(1)
# If the response contains the separator, extract the code block between the separators
if len(code.split(separator)) > 1:
code = code.split(separator)[1]
# If the response contains the separator, extract the code block between the separators
if len(response.split(separator)) > 1:
code = response.split(separator)[1]
# Remove the "python" or "py" prefix if present
if re.match(r"^(python|py)", code):
code = re.sub(r"^(python|py)", "", code)
# If the code is between single backticks, extract the code between them
if re.match(r"^`.*`$", code):
code = re.sub(r"^`(.*)`$", r"\1", code)
# Remove any instances of "df = pd.read_csv('filename.csv')" from the code
code = re.sub(r"df\s*=\s*pd\.read_csv\('.*?'(,.*)?\)", "", code)
# Define the regular expression pattern to match the blacklist items
pattern = r"^(.*\b(" + "|".join(blacklist) + r")\b.*)$"
# Replace the blacklist items with comments
code = re.sub(pattern, r"# not allowed \1", code, flags=re.MULTILINE)
# Return the cleaned and extracted code
return code.strip()
def _extract_rank(self,response: str) -> str:
# Search for a pattern between <rank> and </rank> in the response
match = re.search(r"<rank>(.*)</rank>", response)
if match:
# If a match is found, extract the rank between <rank> and </rank>
rank = match.group(1)
else:
rank = ""
# Return the cleaned and extracted code
return rank.strip()
# Function to remove examples from messages when no longer needed
def _remove_examples(self,messages: str) -> str:
# Define the regular expression pattern
pattern = 'Example Output:\s*<code>.*?</code>\s*'
# Iterate over the list of dictionaries
for dict in messages:
# Access and clean up 'content' field
if dict.get('role') == 'user' and 'content' in dict:
dict['content'] = re.sub(pattern, '', dict['content'], flags=re.DOTALL)
return messages
def task_eval(self, eval_messages):
if 'ipykernel' in sys.modules:
# Jupyter notebook or ipython
display(HTML(f'<p style="color:magenta;">\nCalling Model: {self.llm_func}</p>'))
display(HTML(f'<p><b style="color:magenta;">Trying to determine the best method to answer your question, please wait...</b></p><br>'))
else:
# Other environment (like terminal)
print(colored(f"\n>> Calling Model: {self.llm_func}", "magenta"))
cprint(f"\n>> Trying to determine the best method to answer your question, please wait...\n", 'magenta', attrs=['bold'])
# Call OpenAI API
function_name = {"name": "QA_Response"}
fn_name, arguments, tokens_used = self.llm_func_call(eval_messages,self.task_eval_function, function_name)
# Parse the JSON string to a Python dict
arguments_dict = json.loads(arguments, strict=False)
# Retrieve values
eval_answer = arguments_dict["answer"]
eval_answer_type = arguments_dict["answer_type"]
self.total_tokens_used.append(tokens_used)
return arguments, fn_name, eval_answer, eval_answer_type
def debug_code(self,code,question, llm_cascade=False):
# Initialize the messages list with a system message containing the task prompt
debug_messages = [{"role": "system", "content": self.debug_code_task.format(code,question)}]
using_model = self.llm
if llm_cascade:
using_model = self.llm_gpt4
if 'ipykernel' in sys.modules:
# Jupyter notebook or ipython
display(HTML(f'<p style="color:magenta;">\nCalling Model: {using_model}</p>'))
display(HTML(f'<p><b style="color:magenta;">I have received the first version of the code. I am sending it back to LLM to get it checked for any errors, bugs or inconsistencies, and correction if necessary. Please wait...</b></p><br>'))
else:
# Other environment (like terminal)
print(colored(f"\n>> Calling Model: {using_model}", "magenta"))
cprint(f"\n>> I have received the first version of the code. I am sending it back to LLM to get it checked for any errors, bugs or inconsistencies, and correction if necessary. Please wait...\n", 'magenta', attrs=['bold'])
# Function to display results nicely
def display_task():
if 'ipykernel' in sys.modules:
# Jupyter notebook or ipython
display(HTML(f'<p><b style="color:magenta;">I have finished debugging the code, and will now proceed to the execution...</b></p><br>'))
else:
# Other environment (like terminal)
cprint(f"\n>> I have finished debugging the code, and will now proceed to the execution...\n", 'magenta', attrs=['bold'])
# Call the OpenAI API
llm_response, tokens_used = self.llm_call(debug_messages,temperature=0,llm_cascade=llm_cascade) # higher temperature results in more "creative" answers (sometimes too creative :-))
# Extract the code from the API response
debugged_code = self._extract_code(llm_response)
display_task()
self.total_tokens_used.append(tokens_used)
return debugged_code
def rank_code(self,code,question,llm_cascade=False):
# Initialize the messages list with a system message containing the task prompt
rank_messages = [{"role": "system", "content": self.rank_answer.format(code,question)}]
using_model = self.llm
if llm_cascade:
using_model = self.llm_gpt4
if 'ipykernel' in sys.modules:
# Jupyter notebook or ipython
display(HTML(f'<p style="color:magenta;">\nCalling Model: {using_model}</p>'))
display(HTML(f'<p><b style="color:magenta;">I am going to evaluate and rank the answer. Please wait...</b></p><br>'))
else:
# Other environment (like terminal)
print(colored(f"\n>> Calling Model: {using_model}", "magenta"))
cprint(f"\n>> I am going to evaluate and rank the answer. Please wait..\n", 'magenta', attrs=['bold'])
# Call the OpenAI API
llm_response, tokens_used = self.llm_call(rank_messages,llm_cascade=llm_cascade)
# Extract the rank from the API response
rank = self._extract_rank(llm_response)
self.total_tokens_used.append(tokens_used)
return rank
def pd_agent_converse(self, question=None):
# Functions to display results nicely
def display_results(answer, code, rank, total_tokens_used_sum):
if 'ipykernel' in sys.modules:
if answer is not None:
display(HTML(f'<p><b style="color:blue;">I now have the final answer:</b><br><pre style="color:black; white-space: pre-wrap; font-weight: bold;">{answer}</pre></p><br>'))
if code is not None:
display(HTML(f'<p><b style="color:blue;">Here is the final code that accomplishes the task:</b><br><pre style="color:#555555;">{code}</pre></p><br>'))
if self.vector_db and rank is not None:
display(HTML(f'<p><b style="color:blue;">Rank:</b><br><span style="color:black;">{rank}</span></p><br>'))
display(HTML(f'<p><b style="color:blue;">Total Tokens Used:</b><br><span style="color:black;">{total_tokens_used_sum}</span></p><br>'))
else:
if answer is not None:
cprint(f"\n>> I now have the final answer:\n{answer}\n", 'green', attrs=['bold'])
if code is not None:
cprint(f">> Here is the final code that accomplishes the task:\n{code}\n", 'green', attrs=['bold'])
if self.vector_db and rank is not None:
cprint(f">> Rank:\n{rank}\n", 'green', attrs=['bold'])
cprint(f">> Total tokens used:\n{total_tokens_used_sum}\n", 'yellow', attrs=['bold'])
def display_eval(task_eval, title, total_tokens_used_sum):
if 'ipykernel' in sys.modules:
# Jupyter notebook or ipython
display(HTML(f'<p><b style="color:blue;">{title}</b><br><pre style="color:black; white-space: pre-wrap; font-weight: bold;">{task_eval}</pre></p><br>'))
display(HTML(f'<p><b style="color:blue;">Total Tokens Used:</b><br><span style="color:black;">{total_tokens_used_sum}</span></p><br>'))
else:
# Other environment (like terminal)
cprint(f"\n>> {title}\n{task_eval}\n", 'magenta', attrs=['bold'])
cprint(f">> Total tokens used:\n{total_tokens_used_sum}\n", 'yellow', attrs=['bold'])
# Initialize the eval_messages list
eval_messages = []
# If a question is provided, skip the input prompt
if question is not None:
# Initialize the messages list with a system message containing the task prompt
messages = [{"role": "system", "content": self.system_task}]
# Initialize the messages list with a system message containing the task prompt
eval_messages.append({"role": "user", "content": self.task_evaluation.format(question, self.df_head)})
# Call the task_eval method with the user's question if the exploratory mode is True
if self.exploratory is True:
arguments, fn_name, task_eval, task_type = self.task_eval(eval_messages)
total_tokens_used_sum = sum(self.total_tokens_used)
if task_type == 'narrative':
title = 'Here is the answer to your question:'
display_eval(task_eval, title, total_tokens_used_sum)
return
if task_type == 'follow_up':
title = 'To be able to answer your question, I am going to need some more info:'
display_eval(task_eval, title, total_tokens_used_sum)
return
else:
title = 'Here is the sequence of steps required to complete the task:'
task = task_eval
display_eval(task_eval, title, total_tokens_used_sum)
else:
task = question
if self.vector_db:
# Call the retrieve_answer method to check if the question has already been asked and answered
example_output = self.retrieve_answer(task, self.df_columns, similarity_threshold=self.similarity_threshold)
if example_output is not None:
example_output = example_output
else:
example_output = self.default_example_output
else:
example_output = self.default_example_output
# Call the pd_agent method with the user's question, the messages list, and the dataframe
answer, code, total_tokens_used_sum = self.pd_agent(task, messages, example_output, self.df)
# Rank the LLM response
if self.vector_db:
# Switch to gpt-4 if llm_switch parameter is set to True. This will increase the processing time and cost
if self.llm_switch:
llm_cascade = True
else:
llm_cascade = False
rank = self.rank_code(code,task,llm_cascade=llm_cascade)
else:
rank = ""
display_results(answer, code, rank, total_tokens_used_sum)
if self.vector_db:
# Prompt the user to to give a feedback on the ranking
if 'ipykernel' in sys.modules:
display(HTML('<b style="color:green;">Are you happy with the ranking ? If YES type \'yes\'. If NO type in the new rank on a scale from 1-10:</b>'))
time.sleep(1)
rank_feedback = input()
else:
cprint("\nAre you happy with the ranking ?\nIf YES type 'yes'. If NO type in the new rank on a scale from 1-10:", 'green', attrs=['bold'])
rank_feedback = input()
# If the user types "yes", use the rank as is. If not, use the user's rank.
if rank_feedback.strip().lower() == 'yes':
rank = rank
elif rank_feedback in map(str, range(0, 11)):
rank = rank_feedback
else:
rank = rank
# Add the question and answer pair to the QA retrieval index
self.add_question_answer_pair(task, self.df_columns, code, rank)
return
# Start an infinite loop to keep asking the user for questions
first_iteration = True # Flag for the first iteration of the loop
while True:
# Prompt the user to enter a question or type 'exit' to quit
if 'ipykernel' in sys.modules:
display(HTML('<b style="color:blue;">Enter your question or type \'exit\' to quit:</b>'))
time.sleep(1)
question = input()
else:
cprint("\nEnter your question or type 'exit' to quit:", 'blue', attrs=['bold'])
question = input()
# If the user types 'exit', break out of the loop
if question.strip().lower() == 'exit':
break
if first_iteration:
# Initialize the messages list with a system message containing the task prompt
messages = [{"role": "system", "content": self.system_task}]
first_iteration = False
# Call the task_eval method with the user's question if the exploratory mode is True
if self.exploratory is True:
eval_messages.append({"role": "user", "content": self.task_evaluation.format(question, self.df_head)})
arguments, fn_name, task_eval, task_type = self.task_eval(eval_messages)
eval_messages.append(
{
"role": "assistant",
"content": None,
"function_call": {
"name": fn_name,
"arguments": arguments,
},
}
)
# Remove the oldest conversation from the messages list
if len(eval_messages) > self.MAX_CONVERSATIONS:
eval_messages.pop(0)
eval_messages.pop(0)
total_tokens_used_sum = sum(self.total_tokens_used)
if task_type == 'narrative':
title = 'Here is an answer to your question:'
display_eval(task_eval, title, total_tokens_used_sum)
continue
if task_type == 'follow_up':
title = 'To be able to answer your question, I am going to need some more info:'
display_eval(task_eval, title, total_tokens_used_sum)
continue
else:
title = 'Here is a sequence of steps required to complete the task:'
task = task_eval
display_eval(task_eval, title, total_tokens_used_sum)
else:
task = question
if self.vector_db:
# Call the retrieve_answer method to check if the question has already been asked and answered
example_output = self.retrieve_answer(task, self.df_columns, similarity_threshold=self.similarity_threshold)
if example_output is not None:
example_output = example_output
else:
example_output = self.default_example_output
else:
example_output = self.default_example_output
# Call the pd_agent method with the user's question, the messages list, and the dataframe
answer, code, total_tokens_used_sum = self.pd_agent(task, messages, example_output, self.df)
# Remove the examples from the messages list to minimize the number of tokens used
messages = self._remove_examples(messages)
# Rank the LLM response
if self.vector_db:
# Switch to gpt-4 if llm_switch parameter is set to True. This will increase the processing time and cost
if self.llm_switch:
llm_cascade = True
else:
llm_cascade = False
rank = self.rank_code(code,task,llm_cascade=llm_cascade)
else:
rank = ""
display_results(answer, code, rank, total_tokens_used_sum)
if self.vector_db:
# Prompt the user to to give a feedback on the ranking
if 'ipykernel' in sys.modules:
display(HTML('<b style="color:green;">Are you happy with the ranking ? If YES type \'yes\'. If NO type in the new rank on a scale from 1-10:</b>'))
time.sleep(1)
rank_feedback = input()
else:
cprint("\nAre you happy with the ranking ?\nIf YES type 'yes'. If NO type in the new rank on a scale from 1-10:", 'green', attrs=['bold'])
rank_feedback = input()
# If the user types "yes", use the rank as is. If not, use the user's rank.
if rank_feedback.strip().lower() == 'yes':
rank = rank
elif rank_feedback in map(str, range(0, 11)):
rank = rank_feedback
else:
rank = rank
# Add the question and answer pair to the QA retrieval index
self.add_question_answer_pair(task, self.df_columns, code, rank)
def pd_agent(self, task, messages, example_output,df=None):
# Add a user message with the updated task prompt to the messages list
messages.append({"role": "user", "content": self.user_task.format(self.df_head, task,example_output)})
if 'ipykernel' in sys.modules:
# Jupyter notebook or ipython
display(HTML(f'<p style="color:magenta;">\nCalling Model: {self.llm}</p>'))
display(HTML(f'<p><b style="color:magenta;">I have sent your request to the LLM and awaiting response, please wait...</b></p><br>'))
else:
# Other environment (like terminal)
print(colored(f"\n>> Calling Model: {self.llm}", "magenta"))
cprint(f"\n>> I have sent your request to the LLM and awaiting response, please wait...\n", 'magenta', attrs=['bold'])
# Call the OpenAI API
llm_response, tokens_used = self.llm_call(messages)
# Extract the code from the API response
code = self._extract_code(llm_response)
# Update the total tokens used
self.total_tokens_used.append(tokens_used)
# Initialize error correction counter
error_corrections = 0
# Debug code if debug parameter is set to True
if self.debug:
# Switch to gpt-4 if llm_switch parameter is set to True. This will increase the processing time and cost
if self.llm_switch:
llm_cascade = True
if 'ipykernel' in sys.modules:
# Jupyter notebook
display(HTML('<span style="color: magenta;">Switching model to gpt-4 to debug the code.</span>'))
else:
# CLI
print(colored("\n>> Switching model to GPT-4 to debug the code.", "magenta"))
else:
llm_cascade = False
code = self.debug_code(code, task, llm_cascade=llm_cascade)
# Redirect standard output to a StringIO buffer
with redirect_stdout(io.StringIO()) as output:
# Try to execute the code and handle errors
while error_corrections < self.MAX_ERROR_CORRECTIONS:
try:
messages.append({"role": "assistant", "content": llm_response})
# Remove the oldest conversation from the messages list
if len(messages) > self.MAX_CONVERSATIONS:
messages.pop(1)
messages.pop(1)
# Reset df to the original state before executing the code
self.df = self.original_df.copy()
# Execute the code
if code is not None:
exec(code, {'df': self.df})
break
except Exception as e:
# Print the error message
if 'ipykernel' in sys.modules:
# Jupyter notebook
display(HTML(f'<br><b><span style="color: #d86c00;">I ran into an issue:</span></b><br><pre style="color: #d86c00;">{e}</pre><br><b><span style="color: #d86c00;">I will examine it, and try again with an adjusted code.</span></b><br>'))
else:
# CLI
sys.stderr.write('\033[31m' + f'>> I ran into an issue: {e}. \n>> I will examine it, and try again with an adjusted code.' + '\033[0m' + '\n')
sys.stderr.flush()
# Increment the error correction counter and update the messages list with the error
error_corrections += 1
messages.append({"role": "user", "content": self.error_correct_task.format(e)})
# Switch to gpt-4 if llm_switch parameter is set to True. This will increase the processing time and cost.
if self.llm_switch:
llm_cascade = True
if 'ipykernel' in sys.modules:
# Jupyter notebook
display(HTML('<span style="color: #d86c00;">Switching model to gpt-4 to try to improve the outcome.</span>'))
else:
# CLI
sys.stderr.write('\033[31m' + f'>> Switching model to gpt-4 to try to improve the outcome.' + '\033[0m' + '\n')
sys.stderr.flush()
else:
llm_cascade = False
# Call OpenAI API to get an updated code
llm_response, tokens_used = self.llm_call(messages,llm_cascade=llm_cascade)
code = self._extract_code(llm_response)
self.total_tokens_used.append(tokens_used)
# Get the output from the executed code
answer = output.getvalue()
# Call OpenAI API
# Initialize the messages list with a system message containing the task prompt
insights_messages = [{"role": "user", "content": self.solution_insights.format(task, answer)}]
function_name = {"name": "Solution_Insights"}
fn_name, arguments, tokens_used = self.llm_func_call(insights_messages, self.insights_function, function_name)
# Parse the JSON string to a Python dict
arguments_dict = json.loads(arguments,strict=False)
# Retrieve values
answer = arguments_dict["insight"]
self.total_tokens_used.append(tokens_used)
total_tokens_used_sum = sum(self.total_tokens_used)
# Reset the StringIO buffer
output.truncate(0)
output.seek(0)
return answer, code, total_tokens_used_sum
| [
"None"
] |
2024-01-10 | ShreyAgarwal310/math-gpt | connectAPI.py | import openai
def get_file_contents(filename):
try:
with open(filename, 'r') as f:
return f.read().strip()
except FileNotFoundError:
print("'%s' file not found" % filename)
api_key = get_file_contents('api_key.txt')
openai.api_key = api_key
# Set up your OpenAI API credentials
openai.api_key = api_key
# Define the function to interact with ChatGPT
def chat_with_gpt(prompt):
response = openai.Completion.create(
engine='text-davinci-003', # Choose the appropriate GPT model
prompt=prompt,
max_tokens=100, # Adjust as needed to control the response length
temperature=0.7, # Adjust as needed to control the response randomness
)
return response.choices
# Step 1: Receive the mathematical expression from the user
expression = input("Enter a mathematical expression: ")
# Step 2: Prepare the input for ChatGPT
prompt = f"Given the expression '{expression}', please generate a function that does not contain floating-point extensions or iterations."
# Step 3: Interact with ChatGPT
response_choices = chat_with_gpt(prompt)
# Step 4: Parse and process the response choices
generated_functions = [choice.text.strip() for choice in response_choices]
# Print the generated functions
for i, function in enumerate(generated_functions, start=1):
print(f"Generated function {i}: {function}")
| [
"Given the expression 'PLACEHOLDER', please generate a function that does not contain floating-point extensions or iterations."
] |
2024-01-10 | ShreyAgarwal310/math-gpt | math-gpt.py | import sys
sys.path.insert(0, '/Users/shreyagarwal/Code/GitHub/MATH-GPT/declarative-math-word-problem')
sys.path.insert(0, '/Users/shreyagarwal/Code/GitHub/MATH-GPT/declarative-math-word-problem/prompts')
sys.path.insert(0, '/Users/shreyagarwal/Code/GitHub/MATH-GPT/pal')
sys.path.insert(0, '/Users/christinaxu/Documents/GitHub/declarative-math-word-problem')
sys.path.insert(0, '/Users/christinaxu/Documents/GitHub/declarative-math-word-problem/prompts')
sys.path.insert(0, '/Users/christinaxu/Documents/GitHub/math-gpt/pal')
import tkinter as tk
from tkinter.ttk import *
from utils import *
from declarative_three_shot import DECLARATIVE_THREE_SHOT_AND_PRINCIPLES
import openai
import pal
from pal.prompt import math_prompts
from langchain.chains import PALChain
from langchain import OpenAI, LLMMathChain
from langchain.chains import PALChain
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import OpenAI
import os
# access the api key from whatever file you have it in
def get_file_contents(filename):
try:
with open(filename, 'r') as f:
return f.read().strip()
except FileNotFoundError:
print("'%s' file not found" % filename)
api_key = get_file_contents('api_key.txt')
openai.api_key = api_key
root= tk.Tk()
root.title('math-gpt')
root.resizable(False, False)
# initializing the pal model and interface
MODEL = 'text-davinci-003' #@param {type:"string"}m
interface = pal.interface.ProgramInterface(model=MODEL, get_answer_expr='solution()', verbose=True)
# initializing the chat-gpt prompter for vanilla da-vinci
def chat_with_gpt(prompt):
response=openai.Completion.create(
engine='text-davinci-003', # Choose the appropriate GPT model
prompt=prompt,
max_tokens=100, # Adjust as needed to control the response length
temperature=0.7, # Adjust as needed to control the response randomness
)
return response.choices
# initializing all of the langchain models and tools
os.environ["OPENAI_API_KEY"] = api_key
llm = OpenAI(temperature=0, model_name="text-davinci-003")
tools = load_tools(["llm-math"], llm=llm)
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
return_intermediate_steps=True,
)
# initialize the canvas
canvas1 = tk.Canvas(root, width=750, height=750, bg = "white")
canvas1.pack()
# title text
title_text = tk.Label(canvas1, bg="white", fg="black", height=1, width=10, font=("Times New Roman", 36))
title_text.place(relx=0.5, y=30, anchor="center")
title_text.config(text='Math-GPT')
names_text = tk.Label(canvas1, bg="white", fg="black", height=1, width=53, font=("Times New Roman", 20))
names_text.place(relx=0.5, y=60, anchor="center")
names_text.config(text='Shrey Agarwal, Christina Xu, Hamid Bagheri, Lisong Xu')
prompt_label = tk.Label(canvas1, bg="white", fg="black", height=1, width=50, font=("Times New Roman", 20))
prompt_label.place(relx=0.5, y=100, anchor="center")
prompt_label.config(text='Enter Prompt:')
method_label = tk.Label(canvas1, bg="white", fg="black", height=1, width=50, font=("Times New Roman", 20))
method_label.place(relx=0.5, y=200, anchor="center")
method_label.config(text="Choose your method after you've entered your prompt:")
answer_label = tk.Label(canvas1, bg="white", fg="black", height=1, width=50, font=("Times New Roman", 20))
answer_label.place(relx=0.5, y=275, anchor="center")
answer_label.config(text="The answer will be displayed here:")
explanation_label = tk.Label(canvas1, bg="white", fg="black", height=1, width=100, font=("Times New Roman", 14))
explanation_label.place(relx=0.5, y=340, anchor="center")
explanation_label.config(text="For the Vanilla DaVinci, LangChain, and the Symbolic Solver, an explanation will be provided here:")
# create the entry box
entry1 = tk.Text(root, height = 3, font = ('Times New Roman', 16), bg = "white", fg = "black")
#entry1.pack(padx = 10, pady = 10)
entry1.place(relx=0.5, y = 150, anchor="center")
#entry1 = tk.Entry(width=50, font=("Arial 16"), bg="white", fg="black", justify='center')
#entry1.pack(padx=10, pady=10)
#entry1.place(relx=0.5, y = 150, anchor="center")
# function to call for using vanilla davinci
def use_vanilla_davinci():
expression = entry1.get("1.0", 'end-1c')
# finding the answer
prompt_for_answer = f"Given the expression '{expression}', please generate an answer."
response_choices = chat_with_gpt(prompt_for_answer)
answer = [choice.text.strip() for choice in response_choices]
# finding the explanation
prompt_for_explanation = f"Given the expression '{expression}', please write a solution that correctly addresses the problem and solves it."
response_choices_for_explanation = chat_with_gpt(prompt_for_explanation)
answer_for_explanation = [choice.text.strip() for choice in response_choices_for_explanation]
# configuring labels to display answer and explanation
explanation_text.config(text="")
answer_text.config(text=f"Vanilla answer: '{answer}'.")
explanation_text.config(text=answer_for_explanation)
# function to call for using langchain
def use_langchain():
x1 = entry1.get("1.0", 'end-1c')
llm = OpenAI(temperature = 0)
llm_math = LLMMathChain.from_llm(llm, verbose = True)
answer = llm_math.run(x1)
response = agent(
{
"input": x1
}
)
l = response["intermediate_steps"]
# manipulating AgentAction namedTuple to find answer and explanation
if len(l) >= 2:
answer = ""
explanation = ""
answer = (str(l[len(l) - 1][1]))
for i in l:
explanation += str(i[0]).split(", ", 2)[2][6:-2]
else:
list = l[0]
answer = ""
explanation = ""
answer = str(list[1])
explanation = str(list[0]).split(", ", 2)[2][6:-2]
explanation_text.config(text="")
answer_text.config(text=answer)
explanation_text.config(text=explanation)
# function to call for using PAL
def use_pal():
x1 = entry1.get("1.0", 'end-1c')
prompt = math_prompts.MATH_PROMPT.format(question=x1)
answer = interface.run(prompt)
explanation = ""
explanation_text.config(text="")
answer_text.config(text=answer)
explanation_text.config(text=explanation)
# function to call for using the symbolic solver
def use_symbolic_solver():
x1 = entry1.get("1.0", 'end-1c')
eq_list = get_declarative_equations(model='text-davinci-003', question=x1, prompt=DECLARATIVE_THREE_SHOT_AND_PRINCIPLES, max_tokens=600, stop_token='\n\n\n', temperature=0)
answer = get_final_using_sympy(eq_list)
explanation_text.config(text="")
answer_text.config(text=f"Symbolic Solver answer: '{answer}'.")
explanation_text.config(text=eq_list)
# creating all the buttons and the answer text
button1 = tk.Button(bg = "white", font=("Times New Roman", 18), text='Vanilla DaVinci', borderwidth = 0, relief = "groove", command=use_vanilla_davinci)
canvas1.create_window(212, 240, window=button1)
button2 = tk.Button(bg = "white", font=("Times New Roman", 18), text='LangChain', borderwidth = 0, relief = "groove", command=use_langchain)
canvas1.create_window(345, 240, window=button2)
button3 = tk.Button(bg = "white", font=("Times New Roman", 18), text='PAL', borderwidth = 0, relief = "groove", command=use_pal)
canvas1.create_window(435, 240, window=button3)
button3 = tk.Button(bg = "white", font=("Times New Roman", 18), text='Symbolic Solver', borderwidth = 0, relief = "groove", command=use_symbolic_solver)
canvas1.create_window(546, 240, window=button3)
answer_text = tk.Label(canvas1, bg="white", fg="black", height=1, width=65, font=("Times New Roman", 18), borderwidth = 3, relief = "groove")
answer_text.place(relx=0.5, y=310, anchor="center")
explanation_text = tk.Label(canvas1, bg="white", fg="black", height=10, width=65, font=("Times New Roman", 18), wraplength=300, justify='center', borderwidth = 3, relief = "groove")
explanation_text.place(relx=0.5, y=460, anchor="center")
root.mainloop() | [
"Given the expression 'PLACEHOLDER', please generate an answer.",
"Given the expression 'PLACEHOLDER', please write a solution that correctly addresses the problem and solves it.",
"Times New Roman"
] |
2024-01-10 | ShreyAgarwal310/math-gpt | pal_test.py | from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.chains import PALChain
import openai
import pal
from pal.prompt import math_prompts
def get_file_contents(filename):
try:
with open(filename, 'r') as f:
return f.read().strip()
except FileNotFoundError:
print("'%s' file not found" % filename)
api_key = get_file_contents('api_key.txt')
openai.api_key = api_key
OPENAI_API_KEY = api_key
interface = pal.interface.ProgramInterface(
model='text-davinci-003',
stop='\n\n\n', # stop generation str for Codex API
get_answer_expr='solution()' # python expression evaluated after generated code to obtain answer
)
question = 'Bob says to Alice: if you give me 3 apples and then take half of my apples away, then I will be left with 13 apples. How many apples do I have now?'
prompt = math_prompts.MATH_PROMPT.format(question=question)
answer = interface.run(prompt)
print(answer)
# llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9, openai_api_key=api_key)
# palchain = PALChain.from_math_prompt(llm=llm, verbose=True)
# palchain.run("If my age is half of my dad's age and he is going to be 60 next year, what is my current age?")
# print(palchain.prompt.template) | [] |
2024-01-10 | ShreyAgarwal310/math-gpt | declarative-math-word-problem~experiment.py | from utils import *
from prompts.declarative_eight_shot import DECLARATIVE_EIGHT_SHOT
import openai
import time
#results - three-shot - 176/222 correct - 79.279% accuracy
#results - eight-shot - 158/222 correct - 71.171% accuracy
st = time.time()
def get_file_contents(filename):
try:
with open(filename, 'r') as f:
return f.read().strip()
except FileNotFoundError:
print("'%s' file not found" % filename)
api_key = get_file_contents('api_key.txt')
openai.api_key = api_key
with open('declarative-math-word-problem/algebra222.csv') as f:
questions = [i.split(',')[0] for i in f.readlines()]
with open('declarative-math-word-problem/algebra222.csv') as f:
answers = [i.split(',')[1] for i in f.readlines()]
solver_answers = []
for i in range(0, 24):
eq_list = get_declarative_equations(model='text-davinci-003', question=questions[i], prompt=DECLARATIVE_EIGHT_SHOT, max_tokens=600, stop_token='\n\n\n', temperature=0)
answer = get_final_using_sympy(eq_list)
solver_answers.append(answer)
print(*solver_answers, sep = '\n')
et = time.time()
elapsed_time = et - st
print('Execution time:', elapsed_time, 'seconds') | [] |
2024-01-10 | ShreyAgarwal310/math-gpt | pal~pal_test.py | from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.chains import PALChain
import openai
import pal
from pal.prompt import math_prompts
def get_file_contents(filename):
try:
with open(filename, 'r') as f:
return f.read().strip()
except FileNotFoundError:
print("'%s' file not found" % filename)
api_key = get_file_contents('api_key.txt')
openai.api_key = api_key
interface = pal.interface.ProgramInterface(
model='text-davinci-003',
stop='\n\n\n', # stop generation str for Codex API
get_answer_expr='solution()' # python expression evaluated after generated code to obtain answer
)
# def solution():
# "Bob says to Alice: if you give me 3 apples and then take half of my apples away, then I will be left with 13 apples. How many apples do I have now?"
# apple_given = 3
# apple_left = 13
# apple_now = (apple_left + apple_given) * 2
# result = apple_now
# return result
# print(solution())
question = 'Bob says to Alice: if you give me 3 apples and then take half of my apples away, then I will be left with 13 apples. How many apples do I have now?'
prompt = math_prompts.MATH_PROMPT.format(question=question)
llm = OpenAI(model_name="text-davinci-003", temperature=0.9, openai_api_key=api_key)
palchain = PALChain.from_math_prompt(llm=llm, verbose=True)
palchain.run("If my age is half of my dad's age and he is going to be 60 next year, what is my current age?")
answer = interface.run(prompt)
print(answer) | [] |
2024-01-10 | ShreyAgarwal310/math-gpt | pal_with_langchain.py | import openai
import sys
sys.path.insert(0, '/Users/shreyagarwal/Code/GitHub/MATH-GPT/pal')
import pal
from pal.prompt import math_prompts
from langchain import OpenAI
from langchain.chains.llm import LLMChain
from langchain.chains import PALChain
import os
def get_file_contents(filename):
try:
with open(filename, 'r') as f:
return f.read().strip()
except FileNotFoundError:
print("'%s' file not found" % filename)
api_key = get_file_contents('api_key.txt')
openai.api_key = api_key
os.environ["OPENAI_API_KEY"] = api_key
# MODEL = 'text-davinci-003' #@param {type:"string"}m
# interface = pal.interface.ProgramInterface(model=MODEL, get_answer_expr='solution()', verbose=True)
# question = "Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?"#@param {type:"string"}
# prompt = math_prompts.MATH_PROMPT.format(question=question)
# answer = interface.run(prompt, time_out=10)
# print('\n==========================')
# print(f'The answer is {answer}.')
llm = OpenAI(model_name='text-davinci-003', temperature=0, max_tokens=512)
pal_chain = PALChain.from_math_prompt(llm, verbose=True)
question = "The cafeteria had 23 apples. If they used 20 for lunch and bought 6 more, how many apples do they have?"
pal_chain.run(question) | [] |
2024-01-10 | nabil2i/recordplus | record~tasks.py | from __future__ import absolute_import, unicode_literals
from celery import shared_task
from time import sleep
import openai
from .models import RecordedVideo, Transcription
from openai.error import OpenAIError
# import whisper
@shared_task
def transcribe_video(video_id):
# sleep(10)
print("Transcribing video...")
try:
# get video path
print(f"video id: {video_id}")
video = RecordedVideo.objects.get(pk=video_id)
video_file_path = video.get_video_file_url()
if not video_file_path:
print(f"Video with ID {video_id} not found")
return
with open(video_file_path, 'rb') as video_file:
response = openai.Audio.transcribe("whisper-1", video_file)
# model = whisper.load_model("base")
# response = model.transcribe(video_file)
# with open(video_file_path, 'rb') as video_file:
# response = openai.Transcription.create(
# audio=video_file,
# engine="whisper",
# language="en-US",
# max_tokens=300,
# )
if 'text' in response:
transcription_text = response['text']
print(transcription_text)
transcription, created = Transcription.objects.get_or_create(
recorded_video=video,
defaults={'transcription_text': transcription_text}
)
if not created:
transcription.transcription_text = transcription_text
transcription.save()
print("Video transcribed")
else:
print(f"Error in OpenAI response: {response}")
except RecordedVideo.DoesNotExist:
print(f"Video with ID {video_id} does not exist.")
except OpenAIError as e:
print(f"OpenAI Error: {str(e)}")
except Exception as e:
print(f"Error transcribing the video {video_id}: {str(e)}")
| [] |
2024-01-10 | nabil2i/recordplus | recordplus~settings.py | """
Django settings for recordplus project.
Generated by 'django-admin startproject' using Django 4.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/4.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.2/ref/settings/
"""
import os
from datetime import timedelta
from pathlib import Path
import openai
# import cloudinary
# import cloudinary.uploader
# import cloudinary.api
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# BASE_URL= config(BASE_URL)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
openai.api_key = config('OPENAI_API_KEY')
## CLOUDINARY configuration
# CLOUDINARY_CLOUD_NAME = config('CLOUDINARY_CLOUD_NAME')
# CLOUDINARY_API_KEY = config('CLOUDINARY_API_KEY')
# CLOUDINARY_API_SECRET = config('CLOUDINARY_API_SECRET')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = config('DEBUG', default='False')
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'recordplus.onrender.com']
AUTH_USER_MODEL = 'core.User'
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party apps
'rest_framework',
'rest_framework_simplejwt',
'rest_framework_simplejwt.token_blacklist',
'djoser',
'corsheaders',
'whitenoise.runserver_nostatic',
'drf_yasg',
# # all auth
# 'django.contrib.sites',
# 'allauth',
# 'allauth.account',
# 'allauth.socialaccount',
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.twitter',
# 'allauth.socialaccount.providers.google',
# my apps
'core',
'record',
# 'social_auth'
]
# SOCIALACCOUNT_LOGIN_ON_GET=True # skip one page when authenticating
# # Authentication URLs
# LOGIN_REDIRECT_URL = '/api/auth'
# # ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# LOGOUT_REDIRECT_URL = '/api/auth'
# SOCIALACCOUNT_PROVIDERS = {
# 'google': {
# # For each OAuth based provider, either add a ``SocialApp``
# # (``socialaccount`` app) containing the required client
# # credentials, or list them here:
# # 'APP': {
# # 'client_id': config('GOOGLE_CLIENT_ID'),
# # 'secret': config('GOOGLE_CLIENT_SECRET'),
# # 'key': ''
# # },
# 'SCOPE': [
# 'profile',
# 'email',
# ],
# 'AUTH_PARAMS': {
# 'access_type': 'online',
# }
# },
# 'facebook': {
# 'APP': {
# 'client_id': config('FACEBOOK_APP_ID'),
# 'secret': '',
# }
# },
# 'twitter': {
# 'APP': {
# 'consumer_key': config('TWITTER_API_KEY'),
# 'secret': config('TWITTER_CONSUMER_SECRET'),
# }
# },
# }
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# "allauth.account.middleware.AccountMiddleware",
]
ROOT_URLCONF = 'recordplus.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': [],
'DIRS': [os.path.join(BASE_DIR, 'build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'recordplus.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': config('DB_NAME'),
# 'USER': config('DB_USER'),
# 'PASSWORD': config('DB_PASSWORD'),
# 'HOST': config('DB_HOST'),
# 'PORT': config('DB_PORT')
# }
# }
# Password validation
# https://docs.djangoproject.com/en/4.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CORS_ALLOW_ALL_ORIGINS = True
# CORS_ALLOWED_ORIGINS = [
# "http://34.207.165.115/",
# "https://recordplus.onrender.com/",
# "http://127.0.0.1:8000/",
# "http://localhost:5173/",
# ]
# CORS_ALLOW_METHODS = [
# "GET",
# "POST",
# "PUT",
# "PATCH",
# "DELETE",
# "OPTIONS",
# ]
# CORS_ALLOW_HEADERS = [
# "Accept",
# "Content-Type",
# "Authorization",
# ]
# CORS_ALLOW_CREDENTIALS = True
# Internationalization
# https://docs.djangoproject.com/en/4.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'build/static')
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/4.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
'NON_FIELD_ERRORS_KEY': 'error',
# 'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema',
# 'COERCE_DECIMAL_TO_STRING': False,
'PAGE_SIZE':10,
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
# 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.IsAuthenticated'
# ],
}
SIMPLE_JWT = {
'AUTH_HEADER_TYPES': ('JWT',),
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ALGORITHM': 'HS256',
}
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS=True
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
# DEFAULT_FROM_EMAIL="[email protected]"
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
## `allauth` specific authentication methods, such as login by email
# 'allauth.account.auth_backends.AuthenticationBackend',
]
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS' : {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header'
}
}
}
DJOSER = {
'LOGIN_FIELD': 'email',
'USER_CREATE_PASSWORD_RETYPE': True,
'USERNAME_CHANGED_EMAIL_CONFIRMATION': True,
'PASSWORD_CHANGED_EMAIL_CONFIRMATION': True,
'PASSWORD_RESET_CONFIRM_URL': 'password/reset/confirm/{uid}/{token}',
'USERNAME_RESET_CONFIRM_URL': 'email/reset/confirm/{uid}/{token}',
'ACTIVATION_URL': 'activate/{uid}/{token}',
'SEND_CONFIRMATION_EMAIL': True,
'SEND_ACTIVATION_EMAIL': True,
'SET_USERNAME_RETYPE': True,
'SET_PASSWORD_RETYPE': True,
'SERIALIZERS': {
'user_create': 'core.serializers.UserCreateSerializer',
'user': 'core.serializers.UserCreateSerializer',
'user_delete': 'core.serializers.UserDeleteSerializer',
},
}
# SPECTACULAR_SETTINGS = {
# 'TITLE': 'RECORD PLUS',
# }
CELERY_BROKER_URL = config('CELERY_BROKER_URL')
# CELERY_RESULT_BACKEND = 'rpc://' | [
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.contrib.auth.context_processors.auth",
"django.template.backends.django.DjangoTemplates",
"context_processors"
] |
2024-01-10 | microsoft/unilm | kosmos-g~open_clip~src~open_clip~factory.py | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Optional, Tuple
import torch
from .model import CLIP, convert_weights_to_fp16, resize_pos_embed
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def load_checkpoint(model, checkpoint_path, strict=True):
state_dict = load_state_dict(checkpoint_path)
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def create_model(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
):
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
if pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(model_name, device=device, jit=jit)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if model_name in _MODEL_CONFIGS:
logging.info(f'Loading {model_name} model config.')
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
model = CLIP(**model_cfg)
if pretrained:
checkpoint_path = ''
url = get_pretrained_url(model_name, pretrained)
if url:
checkpoint_path = download_pretrained(url)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
else:
logging.warning(f'Pretrained weights ({pretrained}) not found for model {model_name}.')
raise RuntimeError(f'Pretrained weights ({pretrained}) not found for model {model_name}.')
model.to(device=device)
if precision == "fp16":
assert device.type != 'cpu'
convert_weights_to_fp16(model)
if jit:
model = torch.jit.script(model)
return model
def create_model_and_transforms(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
):
model = create_model(
model_name, pretrained, precision, device, jit,
force_quick_gelu=force_quick_gelu,
pretrained_image=pretrained_image)
preprocess_train = image_transform(model.visual.image_size, is_train=True, mean=mean, std=std)
preprocess_val = image_transform(model.visual.image_size, is_train=False, mean=mean, std=std)
return model, preprocess_train, preprocess_val
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
| [] |
2024-01-10 | TammyPhysique/PaBuddytest | src~assets~Chatbot.py | import os
import openai
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = "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" | [] |
2024-01-10 | linyuxuanlin/Wiki_MkDocs | tools~auto-translater.py | # -*- coding: utf-8 -*-
import os
import openai # pip install openai
import sys
import re
import yaml # pip install PyYAML
#import env
# ่ฎพ็ฝฎ OpenAI API Key ๅ API Base ๅๆฐ๏ผ้่ฟ env.py ไผ ๅ
ฅ
openai.api_key = os.environ.get("CHATGPT_API_KEY")
openai.api_base = os.environ.get("CHATGPT_API_BASE")
# ่ฎพ็ฝฎๆๅคง่พๅ
ฅๅญๆฎต๏ผ่ถ
ๅบไผๆๅ่พๅ
ฅ๏ผ้ฒๆญข่ถ
ๅบ่พๅ
ฅๅญๆฐ้ๅถ
max_length = 1800
# ่ฎพ็ฝฎ็ฟป่ฏ็่ทฏๅพ
dir_to_translate = "docs/zh"
dir_translated = {"en": "docs/en", "es": "docs/es", "ar": "docs/ar"}
# ไธ่ฟ่ก็ฟป่ฏ็ๆไปถๅ่กจ
exclude_list = ["index.md", "Contact-and-Subscribe.md", "WeChat.md"] # ไธ่ฟ่ก็ฟป่ฏ็ๆไปถๅ่กจ
processed_list = "tools/processed_list.txt" # ๅทฒๅค็็ Markdown ๆไปถๅ็ๅ่กจ๏ผไผ่ชๅจ็ๆ
# ็ฑ ChatGPT ็ฟป่ฏ็ๆ็คบ
tips_translated_by_chatgpt = {
"en": "\n\n> This post is translated using ChatGPT, please [**feedback**](https://github.com/linyuxuanlin/Wiki_MkDocs/issues/new) if any omissions.",
"es": "\n\n> Este post estรก traducido usando ChatGPT, por favor [**feedback**](https://github.com/linyuxuanlin/Wiki_MkDocs/issues/new) si hay alguna omisiรณn.",
"ar": "\n\n> ุชู
ุช ุชุฑุฌู
ุฉ ูุฐู ุงูู
ุดุงุฑูุฉ ุจุงุณุชุฎุฏุงู
ChatGPTุ ูุฑุฌู [**ุชุฒููุฏูุง ุจุชุนูููุงุชูู
**](https://github.com/linyuxuanlin/Wiki_MkDocs/issues/new) ุฅุฐุง ูุงูุช ููุงู ุฃู ุญุฐู ุฃู ุฅูู
ุงู."
}
# ๆ็ซ ไฝฟ็จ่ฑๆๆฐๅ็ๆ็คบ๏ผ้ฟๅ
ๆฌ่บซไธบ่ฑๆ็ๆ็ซ ่ขซ้ๅค็ฟป่ฏไธบ่ฑๆ
marker_written_in_en = "\n> This post was originally written in English.\n"
# ๅณไฝฟๅจๅทฒๅค็็ๅ่กจไธญ๏ผไป้่ฆ้ๆฐ็ฟป่ฏ็ๆ ่ฎฐ
marker_force_translate = "\n[translate]\n"
# Front Matter ๅค็่งๅ
front_matter_translation_rules = {
# ่ฐ็จ ChatGPT ่ชๅจ็ฟป่ฏ
"title": lambda value, lang: translate_text(value, lang,"front-matter"),
"description": lambda value, lang: translate_text(value, lang,"front-matter"),
# ไฝฟ็จๅบๅฎ็ๆฟๆข่งๅ
"categories": lambda value, lang: front_matter_replace(value, lang),
"tags": lambda value, lang: front_matter_replace(value, lang),
# ๆชๆทปๅ ็ๅญๆฎตๅฐ้ป่ฎคไธ็ฟป่ฏ
}
# ๅบๅฎๅญๆฎตๆฟๆข่งๅใๆ็ซ ไธญไธไบๅบๅฎ็ๅญๆฎต๏ผไธ้่ฆๆฏ็ฏ้ฝ่ฟ่ก็ฟป่ฏ๏ผไธ็ฟป่ฏ็ปๆๅฏ่ฝไธไธ่ด๏ผๆไปฅ็ดๆฅๆฟๆขๆใ
replace_rules = [
{
# ็ๆไฟกๆฏๆๅจ็ฟป่ฏ
"orginal_text": "> ๅๆๅฐๅ๏ผ<https://wiki-power.com/>",
"replaced_text": {
"en": "> Original: <https://wiki-power.com/>",
"es": "> Direcciรณn original del artรญculo: <https://wiki-power.com/>",
"ar": "> ุนููุงู ุงููุต: <https://wiki-power.com/>",
}
},
{
# ็ๆไฟกๆฏๆๅจ็ฟป่ฏ
"orginal_text": "> ๆฌ็ฏๆ็ซ ๅ [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by/4.0/deed.zh) ๅ่ฎฎไฟๆค๏ผ่ฝฌ่ฝฝ่ฏทๆณจๆๅบๅคใ",
"replaced_text": {
"en": "> This post is protected by [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by/4.0/deed.en) agreement, should be reproduced with attribution.",
"es": "> Este artรญculo estรก protegido por la licencia [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by/4.0/deed.zh). Si desea reproducirlo, por favor indique la fuente.",
"ar": "> ูุชู
ุญู
ุงูุฉ ูุฐุง ุงูู
ูุงู ุจู
ูุฌุจ ุงุชูุงููุฉ [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by/4.0/deed.zh)ุ ููุฑุฌู ุฐูุฑ ุงูู
ุตุฏุฑ ุนูุฏ ุฅุนุงุฏุฉ ุงููุดุฑ.",
}
},
#{
# # ๆ็ซ ไธญ็็ซๅ
้พๆฅ๏ผ่ทณ่ฝฌไธบๅฝๅ็ธๅ่ฏญ่จ็็ฝ้กต
# "orginal_text": "](https://wiki-power.com/",
# "replaced_text": {
# "en": "](https://wiki-power.com/en/",
# "es": "](https://wiki-power.com/es/",
# "ar": "](https://wiki-power.com/ar/",
# }
#}
# {
# # ไธๅ่ฏญ่จๅฏไฝฟ็จไธๅๅพๅบ
# "orginal_text": ":
for index in range(len(value)):
element = value[index]
# print(f"element[{index}] = {element}")
for replacement in front_matter_replace_rules:
if replacement["orginal_text"] in element:
# ไฝฟ็จ replace ๅฝๆฐ้ไธชๆฟๆข
element = element.replace(
replacement["orginal_text"], replacement["replaced_text"][lang])
value[index] = element
# print(f"element[{index}] = {element}")
return value
# ๅฎไน่ฐ็จ ChatGPT API ็ฟป่ฏ็ๅฝๆฐ
def translate_text(text, lang, type):
target_lang = {
"en": "English",
"es": "Spanish",
"ar": "Arabic"
}[lang]
# Front Matter ไธๆญฃๆๅ
ๅฎนไฝฟ็จไธๅ็ prompt ็ฟป่ฏ
# ็ฟป่ฏ Front Matterใ
if type == "front-matter":
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a professional translation engine, please translate the text into a colloquial, professional, elegant and fluent content, without the style of machine translation. You must only translate the text content, never interpret it."},
{"role": "user", "content": f"Translate into {target_lang}:\n\n{text}\n"},
],
)
# ็ฟป่ฏๆญฃๆ
elif type== "main-body":
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a professional translation engine, please translate the text into a colloquial, professional, elegant and fluent content, without the style of machine translation. You must maintain the original markdown format. You must not translate the `[to_be_replace[x]]` field.You must only translate the text content, never interpret it."},
{"role": "user", "content": f"Translate into {target_lang}:\n\n{text}\n"},
],
)
# ่ทๅ็ฟป่ฏ็ปๆ
output_text = completion.choices[0].message.content
return output_text
# Front Matter ๅค็่งๅ
def translate_front_matter(front_matter, lang):
translated_front_matter = {}
for key, value in front_matter.items():
if key in front_matter_translation_rules:
processed_value = front_matter_translation_rules[key](value, lang)
else:
# ๅฆๆๅจ่งๅๅ่กจๅ
๏ผๅไธๅไปปไฝ็ฟป่ฏๆๆฟๆขๆไฝ
processed_value = value
translated_front_matter[key] = processed_value
# print(key, ":", processed_value)
return translated_front_matter
# ๅฎไนๆ็ซ ๆๅๅฝๆฐ
def split_text(text, max_length):
# ๆ นๆฎๆฎต่ฝๆๅๆ็ซ
paragraphs = text.split("\n\n")
output_paragraphs = []
current_paragraph = ""
for paragraph in paragraphs:
if len(current_paragraph) + len(paragraph) + 2 <= max_length:
# ๅฆๆๅฝๅๆฎต่ฝๅ ไธๆฐๆฎต่ฝ็้ฟๅบฆไธ่ถ
่ฟๆๅคง้ฟๅบฆ๏ผๅฐฑๅฐๅฎไปฌๅๅนถ
if current_paragraph:
current_paragraph += "\n\n"
current_paragraph += paragraph
else:
# ๅฆๅๅฐๅฝๅๆฎต่ฝๆทปๅ ๅฐ่พๅบๅ่กจไธญ๏ผๅนถ้ๆฐๅผๅงไธไธชๆฐๆฎต่ฝ
output_paragraphs.append(current_paragraph)
current_paragraph = paragraph
# ๅฐๆๅไธไธชๆฎต่ฝๆทปๅ ๅฐ่พๅบๅ่กจไธญ
if current_paragraph:
output_paragraphs.append(current_paragraph)
# ๅฐ่พๅบๆฎต่ฝๅๅนถไธบๅญ็ฌฆไธฒ
output_text = "\n\n".join(output_paragraphs)
return output_text
# ๅฎไน็ฟป่ฏๆไปถ็ๅฝๆฐ
def translate_file(input_file, filename, lang):
print(f"Translating into {lang}: {filename}")
sys.stdout.flush()
# ๅฎไน่พๅบๆไปถ
if lang in dir_translated:
output_dir = dir_translated[lang]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_file = os.path.join(output_dir, filename)
# ่ฏปๅ่พๅ
ฅๆไปถๅ
ๅฎน
with open(input_file, "r", encoding="utf-8") as f:
input_text = f.read()
# ๅๅปบไธไธชๅญๅ
ธๆฅๅญๅจๅ ไฝ่ฏๅๅฏนๅบ็ๆฟๆขๆๆฌ
placeholder_dict = {}
# ไฝฟ็จ for ๅพช็ฏๅบ็จๆฟๆข่งๅ๏ผๅนถๅฐๅน้
็ๆๆฌๆฟๆขไธบๅ ไฝ่ฏ
for i, rule in enumerate(replace_rules):
find_text = rule["orginal_text"]
replace_with = rule["replaced_text"][lang]
placeholder = f"[to_be_replace[{i + 1}]]"
input_text = input_text.replace(find_text, placeholder)
placeholder_dict[placeholder] = replace_with
# ๅ ้ค่ฏๆไธญๆ็คบๅผบๅถ็ฟป่ฏ็ marker
input_text = input_text.replace(marker_force_translate, "")
# ๅ ้คๅ
ถไปๅบ่ฑๆๅคๅ
ถไป่ฏญ่จ่ฏๆไธญ็ marker_written_in_en
if lang != "en":
input_text = input_text.replace(marker_written_in_en, "")
# ไฝฟ็จๆญฃๅ่กจ่พพๅผๆฅๅน้
Front Matter
front_matter_match = re.search(r'---\n(.*?)\n---', input_text, re.DOTALL)
if front_matter_match:
front_matter_text = front_matter_match.group(1)
# ไฝฟ็จPyYAMLๅ ่ฝฝYAMLๆ ผๅผ็ๆฐๆฎ
front_matter_data = yaml.safe_load(front_matter_text)
# ๆ็
งๅๆ็่งๅๅฏน Front Matter ่ฟ่ก็ฟป่ฏ
front_matter_data = translate_front_matter(front_matter_data, lang)
# ๅฐๅค็ๅฎ็ๆฐๆฎ่ฝฌๆขๅ YAML
front_matter_text_processed = yaml.dump(
front_matter_data, allow_unicode=True, default_style=None, sort_keys=False)
# ๆๆถๅ ้คๆชๅค็็ Front Matter
input_text = input_text.replace(
"---\n"+front_matter_text+"\n---\n", "")
else:
# print("ๆฒกๆๆพๅฐfront matter๏ผไธ่ฟ่กๅค็ใ")
pass
# print(input_text) # debug ็จ๏ผ็็่พๅ
ฅ็ๆฏไปไน
# ๆๅๆ็ซ
paragraphs = input_text.split("\n\n")
input_text = ""
output_paragraphs = []
current_paragraph = ""
for paragraph in paragraphs:
if len(current_paragraph) + len(paragraph) + 2 <= max_length:
# ๅฆๆๅฝๅๆฎต่ฝๅ ไธๆฐๆฎต่ฝ็้ฟๅบฆไธ่ถ
่ฟๆๅคง้ฟๅบฆ๏ผๅฐฑๅฐๅฎไปฌๅๅนถ
if current_paragraph:
current_paragraph += "\n\n"
current_paragraph += paragraph
else:
# ๅฆๅ็ฟป่ฏๅฝๅๆฎต่ฝ๏ผๅนถๅฐ็ฟป่ฏ็ปๆๆทปๅ ๅฐ่พๅบๅ่กจไธญ
output_paragraphs.append(translate_text(current_paragraph, lang,"main-body"))
current_paragraph = paragraph
# ๅค็ๆๅไธไธชๆฎต่ฝ
if current_paragraph:
if len(current_paragraph) + len(input_text) <= max_length:
# ๅฆๆๅฝๅๆฎต่ฝๅ ไธไนๅ็ๆๆฌ้ฟๅบฆไธ่ถ
่ฟๆๅคง้ฟๅบฆ๏ผๅฐฑๅฐๅฎไปฌๅๅนถ
input_text += "\n\n" + current_paragraph
else:
# ๅฆๅ็ฟป่ฏๅฝๅๆฎต่ฝ๏ผๅนถๅฐ็ฟป่ฏ็ปๆๆทปๅ ๅฐ่พๅบๅ่กจไธญ
output_paragraphs.append(translate_text(current_paragraph, lang,"main-body"))
# ๅฆๆ่ฟๆๆช็ฟป่ฏ็ๆๆฌ๏ผๅฐฑๅฐๅฎไปฌๆทปๅ ๅฐ่พๅบๅ่กจไธญ
if input_text:
output_paragraphs.append(translate_text(input_text, lang,"main-body"))
# ๅฐ่พๅบๆฎต่ฝๅๅนถไธบๅญ็ฌฆไธฒ
output_text = "\n\n".join(output_paragraphs)
if front_matter_match:
# ๅ ๅ
ฅ Front Matter
output_text = "---\n" + front_matter_text_processed + "---\n\n" + output_text
# ๅ ๅ
ฅ็ฑ ChatGPT ็ฟป่ฏ็ๆ็คบ
if lang == "en":
output_text = output_text + tips_translated_by_chatgpt["en"]
elif lang == "es":
output_text = output_text + tips_translated_by_chatgpt["es"]
elif lang == "ar":
output_text = output_text + tips_translated_by_chatgpt["ar"]
# ๆๅ๏ผๅฐๅ ไฝ่ฏๆฟๆขไธบๅฏนๅบ็ๆฟๆขๆๆฌ
for placeholder, replacement in placeholder_dict.items():
output_text = output_text.replace(placeholder, replacement)
# ๅๅ
ฅ่พๅบๆไปถ
with open(output_file, "w", encoding="utf-8") as f:
f.write(output_text)
# ๆๆไปถๅ็งฐ้กบๅบๆๅบ
file_list = os.listdir(dir_to_translate)
sorted_file_list = sorted(file_list)
# print(sorted_file_list)
try:
# ๅๅปบไธไธชๅค้จๅ่กจๆไปถ๏ผๅญๆพๅทฒๅค็็ Markdown ๆไปถๅๅ่กจ
if not os.path.exists(processed_list):
with open(processed_list, "w", encoding="utf-8") as f:
print("processed_list created")
sys.stdout.flush()
# ้ๅ็ฎๅฝไธ็ๆๆ.mdๆไปถ๏ผๅนถ่ฟ่ก็ฟป่ฏ
for filename in sorted_file_list:
if filename.endswith(".md"):
input_file = os.path.join(dir_to_translate, filename)
# ่ฏปๅ Markdown ๆไปถ็ๅ
ๅฎน
with open(input_file, "r", encoding="utf-8") as f:
md_content = f.read()
# ่ฏปๅprocessed_listๅ
ๅฎน
with open(processed_list, "r", encoding="utf-8") as f:
processed_list_content = f.read()
if marker_force_translate in md_content: # ๅฆๆๆๅผบๅถ็ฟป่ฏ็ๆ ่ฏ๏ผๅๆง่ก่ฟ้จๅ็ไปฃ็
if marker_written_in_en in md_content: # ็ฟป่ฏไธบ้ค่ฑๆไนๅค็่ฏญ่จ
print("Pass the en-en translation: ", filename)
sys.stdout.flush()
translate_file(input_file, filename, "es")
translate_file(input_file, filename, "ar")
else: # ็ฟป่ฏไธบๆๆ่ฏญ่จ
translate_file(input_file, filename, "en")
translate_file(input_file, filename, "es")
translate_file(input_file, filename, "ar")
elif filename in exclude_list: # ไธ่ฟ่ก็ฟป่ฏ
print(f"Pass the post in exclude_list: {filename}")
sys.stdout.flush()
elif filename in processed_list_content: # ไธ่ฟ่ก็ฟป่ฏ
print(f"Pass the post in processed_list: {filename}")
sys.stdout.flush()
elif marker_written_in_en in md_content: # ็ฟป่ฏไธบ้ค่ฑๆไนๅค็่ฏญ่จ
print(f"Pass the en-en translation: {filename}")
sys.stdout.flush()
for lang in ["es", "ar"]:
translate_file(input_file, filename, lang)
else: # ็ฟป่ฏไธบๆๆ่ฏญ่จ
for lang in ["en", "es", "ar"]:
translate_file(input_file, filename, lang)
# ๅฐๅค็ๅฎๆ็ๆไปถๅๅ ๅฐๅ่กจ๏ผไธๆฌก่ทณ่ฟไธๅค็
if filename not in processed_list_content:
print(f"Added into processed_list: {filename}")
with open(processed_list, "a", encoding="utf-8") as f:
f.write("\n")
f.write(filename)
# ๅผบๅถๅฐ็ผๅฒๅบไธญ็ๆฐๆฎๅทๆฐๅฐ็ป็ซฏไธญ๏ผไฝฟ็จ GitHub Action ๆถๆนไพฟๅฎๆถๆฅ็่ฟ็จ
sys.stdout.flush()
# ๆๆไปปๅกๅฎๆ็ๆ็คบ
print("Congratulations! All files processed done.")
sys.stdout.flush()
except Exception as e:
# ๆ่ทๅผๅธธๅนถ่พๅบ้่ฏฏไฟกๆฏ
print(f"An error has occurred: {e}")
sys.stdout.flush()
raise SystemExit(1) # 1 ่กจ็คบ้ๆญฃๅธธ้ๅบ๏ผๅฏไปฅๆ นๆฎ้่ฆๆดๆน้ๅบ็
# os.remove(input_file) # ๅ ้คๆบๆไปถ
| [
"Translate into PLACEHOLDER:\n\nPLACEHOLDER\n",
"You are a professional translation engine, please translate the text into a colloquial, professional, elegant and fluent content, without the style of machine translation. You must only translate the text content, never interpret it.",
"You are a professional translation engine, please translate the text into a colloquial, professional, elegant and fluent content, without the style of machine translation. You must maintain the original markdown format. You must not translate the `[to_be_replace[x]]` field.You must only translate the text content, never interpret it."
] |
2024-01-10 | linyuxuanlin/Wiki_MkDocs | tools~auto-translater_local.py | # -*- coding: utf-8 -*-
import os
import openai # pip install openai
import sys
import re
import yaml # pip install PyYAML
import env
# ่ฎพ็ฝฎ OpenAI API Key ๅ API Base ๅๆฐ๏ผ้่ฟ env.py ไผ ๅ
ฅ
openai.api_key = os.environ.get("CHATGPT_API_KEY")
openai.api_base = os.environ.get("CHATGPT_API_BASE")
# ่ฎพ็ฝฎๆๅคง่พๅ
ฅๅญๆฎต๏ผ่ถ
ๅบไผๆๅ่พๅ
ฅ๏ผ้ฒๆญข่ถ
ๅบ่พๅ
ฅๅญๆฐ้ๅถ
max_length = 1800
# ่ฎพ็ฝฎ็ฟป่ฏ็่ทฏๅพ
dir_to_translate = "docs/zh"
dir_translated = {"en": "docs/en", "es": "docs/es", "ar": "docs/ar"}
# ไธ่ฟ่ก็ฟป่ฏ็ๆไปถๅ่กจ
exclude_list = ["index.md", "Contact-and-Subscribe.md", "WeChat.md"] # ไธ่ฟ่ก็ฟป่ฏ็ๆไปถๅ่กจ
processed_list = "tools/processed_list.txt" # ๅทฒๅค็็ Markdown ๆไปถๅ็ๅ่กจ๏ผไผ่ชๅจ็ๆ
# ็ฑ ChatGPT ็ฟป่ฏ็ๆ็คบ
tips_translated_by_chatgpt = {
"en": "\n\n> This post is translated using ChatGPT, please [**feedback**](https://github.com/linyuxuanlin/Wiki_MkDocs/issues/new) if any omissions.",
"es": "\n\n> Este post estรก traducido usando ChatGPT, por favor [**feedback**](https://github.com/linyuxuanlin/Wiki_MkDocs/issues/new) si hay alguna omisiรณn.",
"ar": "\n\n> ุชู
ุช ุชุฑุฌู
ุฉ ูุฐู ุงูู
ุดุงุฑูุฉ ุจุงุณุชุฎุฏุงู
ChatGPTุ ูุฑุฌู [**ุชุฒููุฏูุง ุจุชุนูููุงุชูู
**](https://github.com/linyuxuanlin/Wiki_MkDocs/issues/new) ุฅุฐุง ูุงูุช ููุงู ุฃู ุญุฐู ุฃู ุฅูู
ุงู."
}
# ๆ็ซ ไฝฟ็จ่ฑๆๆฐๅ็ๆ็คบ๏ผ้ฟๅ
ๆฌ่บซไธบ่ฑๆ็ๆ็ซ ่ขซ้ๅค็ฟป่ฏไธบ่ฑๆ
marker_written_in_en = "\n> This post was originally written in English.\n"
# ๅณไฝฟๅจๅทฒๅค็็ๅ่กจไธญ๏ผไป้่ฆ้ๆฐ็ฟป่ฏ็ๆ ่ฎฐ
marker_force_translate = "\n[translate]\n"
# Front Matter ๅค็่งๅ
front_matter_translation_rules = {
# ่ฐ็จ ChatGPT ่ชๅจ็ฟป่ฏ
"title": lambda value, lang: translate_text(value, lang,"front-matter"),
"description": lambda value, lang: translate_text(value, lang,"front-matter"),
# ไฝฟ็จๅบๅฎ็ๆฟๆข่งๅ
"categories": lambda value, lang: front_matter_replace(value, lang),
"tags": lambda value, lang: front_matter_replace(value, lang),
# ๆชๆทปๅ ็ๅญๆฎตๅฐ้ป่ฎคไธ็ฟป่ฏ
}
# ๅบๅฎๅญๆฎตๆฟๆข่งๅใๆ็ซ ไธญไธไบๅบๅฎ็ๅญๆฎต๏ผไธ้่ฆๆฏ็ฏ้ฝ่ฟ่ก็ฟป่ฏ๏ผไธ็ฟป่ฏ็ปๆๅฏ่ฝไธไธ่ด๏ผๆไปฅ็ดๆฅๆฟๆขๆใ
replace_rules = [
{
# ็ๆไฟกๆฏๆๅจ็ฟป่ฏ
"orginal_text": "> ๅๆๅฐๅ๏ผ<https://wiki-power.com/>",
"replaced_text": {
"en": "> Original: <https://wiki-power.com/>",
"es": "> Direcciรณn original del artรญculo: <https://wiki-power.com/>",
"ar": "> ุนููุงู ุงููุต: <https://wiki-power.com/>",
}
},
{
# ็ๆไฟกๆฏๆๅจ็ฟป่ฏ
"orginal_text": "> ๆฌ็ฏๆ็ซ ๅ [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by/4.0/deed.zh) ๅ่ฎฎไฟๆค๏ผ่ฝฌ่ฝฝ่ฏทๆณจๆๅบๅคใ",
"replaced_text": {
"en": "> This post is protected by [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by/4.0/deed.en) agreement, should be reproduced with attribution.",
"es": "> Este artรญculo estรก protegido por la licencia [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by/4.0/deed.zh). Si desea reproducirlo, por favor indique la fuente.",
"ar": "> ูุชู
ุญู
ุงูุฉ ูุฐุง ุงูู
ูุงู ุจู
ูุฌุจ ุงุชูุงููุฉ [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by/4.0/deed.zh)ุ ููุฑุฌู ุฐูุฑ ุงูู
ุตุฏุฑ ุนูุฏ ุฅุนุงุฏุฉ ุงููุดุฑ.",
}
},
#{
# # ๆ็ซ ไธญ็็ซๅ
้พๆฅ๏ผ่ทณ่ฝฌไธบๅฝๅ็ธๅ่ฏญ่จ็็ฝ้กต
# "orginal_text": "](https://wiki-power.com/",
# "replaced_text": {
# "en": "](https://wiki-power.com/en/",
# "es": "](https://wiki-power.com/es/",
# "ar": "](https://wiki-power.com/ar/",
# }
#}
# {
# # ไธๅ่ฏญ่จๅฏไฝฟ็จไธๅๅพๅบ
# "orginal_text": ":
for index in range(len(value)):
element = value[index]
# print(f"element[{index}] = {element}")
for replacement in front_matter_replace_rules:
if replacement["orginal_text"] in element:
# ไฝฟ็จ replace ๅฝๆฐ้ไธชๆฟๆข
element = element.replace(
replacement["orginal_text"], replacement["replaced_text"][lang])
value[index] = element
# print(f"element[{index}] = {element}")
return value
# ๅฎไน่ฐ็จ ChatGPT API ็ฟป่ฏ็ๅฝๆฐ
def translate_text(text, lang, type):
target_lang = {
"en": "English",
"es": "Spanish",
"ar": "Arabic"
}[lang]
# Front Matter ไธๆญฃๆๅ
ๅฎนไฝฟ็จไธๅ็ prompt ็ฟป่ฏ
# ็ฟป่ฏ Front Matterใ
if type == "front-matter":
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a professional translation engine, please translate the text into a colloquial, professional, elegant and fluent content, without the style of machine translation. You must only translate the text content, never interpret it."},
{"role": "user", "content": f"Translate into {target_lang}:\n\n{text}\n"},
],
)
# ็ฟป่ฏๆญฃๆ
elif type== "main-body":
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a professional translation engine, please translate the text into a colloquial, professional, elegant and fluent content, without the style of machine translation. You must maintain the original markdown format. You must not translate the `[to_be_replace[x]]` field.You must only translate the text content, never interpret it."},
{"role": "user", "content": f"Translate into {target_lang}:\n\n{text}\n"},
],
)
# ่ทๅ็ฟป่ฏ็ปๆ
output_text = completion.choices[0].message.content
return output_text
# Front Matter ๅค็่งๅ
def translate_front_matter(front_matter, lang):
translated_front_matter = {}
for key, value in front_matter.items():
if key in front_matter_translation_rules:
processed_value = front_matter_translation_rules[key](value, lang)
else:
# ๅฆๆๅจ่งๅๅ่กจๅ
๏ผๅไธๅไปปไฝ็ฟป่ฏๆๆฟๆขๆไฝ
processed_value = value
translated_front_matter[key] = processed_value
# print(key, ":", processed_value)
return translated_front_matter
# ๅฎไนๆ็ซ ๆๅๅฝๆฐ
def split_text(text, max_length):
# ๆ นๆฎๆฎต่ฝๆๅๆ็ซ
paragraphs = text.split("\n\n")
output_paragraphs = []
current_paragraph = ""
for paragraph in paragraphs:
if len(current_paragraph) + len(paragraph) + 2 <= max_length:
# ๅฆๆๅฝๅๆฎต่ฝๅ ไธๆฐๆฎต่ฝ็้ฟๅบฆไธ่ถ
่ฟๆๅคง้ฟๅบฆ๏ผๅฐฑๅฐๅฎไปฌๅๅนถ
if current_paragraph:
current_paragraph += "\n\n"
current_paragraph += paragraph
else:
# ๅฆๅๅฐๅฝๅๆฎต่ฝๆทปๅ ๅฐ่พๅบๅ่กจไธญ๏ผๅนถ้ๆฐๅผๅงไธไธชๆฐๆฎต่ฝ
output_paragraphs.append(current_paragraph)
current_paragraph = paragraph
# ๅฐๆๅไธไธชๆฎต่ฝๆทปๅ ๅฐ่พๅบๅ่กจไธญ
if current_paragraph:
output_paragraphs.append(current_paragraph)
# ๅฐ่พๅบๆฎต่ฝๅๅนถไธบๅญ็ฌฆไธฒ
output_text = "\n\n".join(output_paragraphs)
return output_text
# ๅฎไน็ฟป่ฏๆไปถ็ๅฝๆฐ
def translate_file(input_file, filename, lang):
print(f"Translating into {lang}: {filename}")
sys.stdout.flush()
# ๅฎไน่พๅบๆไปถ
if lang in dir_translated:
output_dir = dir_translated[lang]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_file = os.path.join(output_dir, filename)
# ่ฏปๅ่พๅ
ฅๆไปถๅ
ๅฎน
with open(input_file, "r", encoding="utf-8") as f:
input_text = f.read()
# ๅๅปบไธไธชๅญๅ
ธๆฅๅญๅจๅ ไฝ่ฏๅๅฏนๅบ็ๆฟๆขๆๆฌ
placeholder_dict = {}
# ไฝฟ็จ for ๅพช็ฏๅบ็จๆฟๆข่งๅ๏ผๅนถๅฐๅน้
็ๆๆฌๆฟๆขไธบๅ ไฝ่ฏ
for i, rule in enumerate(replace_rules):
find_text = rule["orginal_text"]
replace_with = rule["replaced_text"][lang]
placeholder = f"[to_be_replace[{i + 1}]]"
input_text = input_text.replace(find_text, placeholder)
placeholder_dict[placeholder] = replace_with
# ๅ ้ค่ฏๆไธญๆ็คบๅผบๅถ็ฟป่ฏ็ marker
input_text = input_text.replace(marker_force_translate, "")
# ๅ ้คๅ
ถไปๅบ่ฑๆๅคๅ
ถไป่ฏญ่จ่ฏๆไธญ็ marker_written_in_en
if lang != "en":
input_text = input_text.replace(marker_written_in_en, "")
# ไฝฟ็จๆญฃๅ่กจ่พพๅผๆฅๅน้
Front Matter
front_matter_match = re.search(r'---\n(.*?)\n---', input_text, re.DOTALL)
if front_matter_match:
front_matter_text = front_matter_match.group(1)
# ไฝฟ็จPyYAMLๅ ่ฝฝYAMLๆ ผๅผ็ๆฐๆฎ
front_matter_data = yaml.safe_load(front_matter_text)
# ๆ็
งๅๆ็่งๅๅฏน Front Matter ่ฟ่ก็ฟป่ฏ
front_matter_data = translate_front_matter(front_matter_data, lang)
# ๅฐๅค็ๅฎ็ๆฐๆฎ่ฝฌๆขๅ YAML
front_matter_text_processed = yaml.dump(
front_matter_data, allow_unicode=True, default_style=None, sort_keys=False)
# ๆๆถๅ ้คๆชๅค็็ Front Matter
input_text = input_text.replace(
"---\n"+front_matter_text+"\n---\n", "")
else:
# print("ๆฒกๆๆพๅฐfront matter๏ผไธ่ฟ่กๅค็ใ")
pass
# print(input_text) # debug ็จ๏ผ็็่พๅ
ฅ็ๆฏไปไน
# ๆๅๆ็ซ
paragraphs = input_text.split("\n\n")
input_text = ""
output_paragraphs = []
current_paragraph = ""
for paragraph in paragraphs:
if len(current_paragraph) + len(paragraph) + 2 <= max_length:
# ๅฆๆๅฝๅๆฎต่ฝๅ ไธๆฐๆฎต่ฝ็้ฟๅบฆไธ่ถ
่ฟๆๅคง้ฟๅบฆ๏ผๅฐฑๅฐๅฎไปฌๅๅนถ
if current_paragraph:
current_paragraph += "\n\n"
current_paragraph += paragraph
else:
# ๅฆๅ็ฟป่ฏๅฝๅๆฎต่ฝ๏ผๅนถๅฐ็ฟป่ฏ็ปๆๆทปๅ ๅฐ่พๅบๅ่กจไธญ
output_paragraphs.append(translate_text(current_paragraph, lang,"main-body"))
current_paragraph = paragraph
# ๅค็ๆๅไธไธชๆฎต่ฝ
if current_paragraph:
if len(current_paragraph) + len(input_text) <= max_length:
# ๅฆๆๅฝๅๆฎต่ฝๅ ไธไนๅ็ๆๆฌ้ฟๅบฆไธ่ถ
่ฟๆๅคง้ฟๅบฆ๏ผๅฐฑๅฐๅฎไปฌๅๅนถ
input_text += "\n\n" + current_paragraph
else:
# ๅฆๅ็ฟป่ฏๅฝๅๆฎต่ฝ๏ผๅนถๅฐ็ฟป่ฏ็ปๆๆทปๅ ๅฐ่พๅบๅ่กจไธญ
output_paragraphs.append(translate_text(current_paragraph, lang,"main-body"))
# ๅฆๆ่ฟๆๆช็ฟป่ฏ็ๆๆฌ๏ผๅฐฑๅฐๅฎไปฌๆทปๅ ๅฐ่พๅบๅ่กจไธญ
if input_text:
output_paragraphs.append(translate_text(input_text, lang,"main-body"))
# ๅฐ่พๅบๆฎต่ฝๅๅนถไธบๅญ็ฌฆไธฒ
output_text = "\n\n".join(output_paragraphs)
if front_matter_match:
# ๅ ๅ
ฅ Front Matter
output_text = "---\n" + front_matter_text_processed + "---\n\n" + output_text
# ๅ ๅ
ฅ็ฑ ChatGPT ็ฟป่ฏ็ๆ็คบ
if lang == "en":
output_text = output_text + tips_translated_by_chatgpt["en"]
elif lang == "es":
output_text = output_text + tips_translated_by_chatgpt["es"]
elif lang == "ar":
output_text = output_text + tips_translated_by_chatgpt["ar"]
# ๆๅ๏ผๅฐๅ ไฝ่ฏๆฟๆขไธบๅฏนๅบ็ๆฟๆขๆๆฌ
for placeholder, replacement in placeholder_dict.items():
output_text = output_text.replace(placeholder, replacement)
# ๅๅ
ฅ่พๅบๆไปถ
with open(output_file, "w", encoding="utf-8") as f:
f.write(output_text)
# ๆๆไปถๅ็งฐ้กบๅบๆๅบ
file_list = os.listdir(dir_to_translate)
sorted_file_list = sorted(file_list)
# print(sorted_file_list)
try:
# ๅๅปบไธไธชๅค้จๅ่กจๆไปถ๏ผๅญๆพๅทฒๅค็็ Markdown ๆไปถๅๅ่กจ
if not os.path.exists(processed_list):
with open(processed_list, "w", encoding="utf-8") as f:
print("processed_list created")
sys.stdout.flush()
# ้ๅ็ฎๅฝไธ็ๆๆ.mdๆไปถ๏ผๅนถ่ฟ่ก็ฟป่ฏ
for filename in sorted_file_list:
if filename.endswith(".md"):
input_file = os.path.join(dir_to_translate, filename)
# ่ฏปๅ Markdown ๆไปถ็ๅ
ๅฎน
with open(input_file, "r", encoding="utf-8") as f:
md_content = f.read()
# ่ฏปๅprocessed_listๅ
ๅฎน
with open(processed_list, "r", encoding="utf-8") as f:
processed_list_content = f.read()
if marker_force_translate in md_content: # ๅฆๆๆๅผบๅถ็ฟป่ฏ็ๆ ่ฏ๏ผๅๆง่ก่ฟ้จๅ็ไปฃ็
if marker_written_in_en in md_content: # ็ฟป่ฏไธบ้ค่ฑๆไนๅค็่ฏญ่จ
print("Pass the en-en translation: ", filename)
sys.stdout.flush()
translate_file(input_file, filename, "es")
translate_file(input_file, filename, "ar")
else: # ็ฟป่ฏไธบๆๆ่ฏญ่จ
translate_file(input_file, filename, "en")
translate_file(input_file, filename, "es")
translate_file(input_file, filename, "ar")
elif filename in exclude_list: # ไธ่ฟ่ก็ฟป่ฏ
print(f"Pass the post in exclude_list: {filename}")
sys.stdout.flush()
elif filename in processed_list_content: # ไธ่ฟ่ก็ฟป่ฏ
print(f"Pass the post in processed_list: {filename}")
sys.stdout.flush()
elif marker_written_in_en in md_content: # ็ฟป่ฏไธบ้ค่ฑๆไนๅค็่ฏญ่จ
print(f"Pass the en-en translation: {filename}")
sys.stdout.flush()
for lang in ["es", "ar"]:
translate_file(input_file, filename, lang)
else: # ็ฟป่ฏไธบๆๆ่ฏญ่จ
for lang in ["en", "es", "ar"]:
translate_file(input_file, filename, lang)
# ๅฐๅค็ๅฎๆ็ๆไปถๅๅ ๅฐๅ่กจ๏ผไธๆฌก่ทณ่ฟไธๅค็
if filename not in processed_list_content:
print(f"Added into processed_list: {filename}")
with open(processed_list, "a", encoding="utf-8") as f:
f.write("\n")
f.write(filename)
# ๅผบๅถๅฐ็ผๅฒๅบไธญ็ๆฐๆฎๅทๆฐๅฐ็ป็ซฏไธญ๏ผไฝฟ็จ GitHub Action ๆถๆนไพฟๅฎๆถๆฅ็่ฟ็จ
sys.stdout.flush()
# ๆๆไปปๅกๅฎๆ็ๆ็คบ
print("Congratulations! All files processed done.")
sys.stdout.flush()
except Exception as e:
# ๆ่ทๅผๅธธๅนถ่พๅบ้่ฏฏไฟกๆฏ
print(f"An error has occurred: {e}")
sys.stdout.flush()
raise SystemExit(1) # 1 ่กจ็คบ้ๆญฃๅธธ้ๅบ๏ผๅฏไปฅๆ นๆฎ้่ฆๆดๆน้ๅบ็
# os.remove(input_file) # ๅ ้คๆบๆไปถ
| [
"Translate into PLACEHOLDER:\n\nPLACEHOLDER\n",
"You are a professional translation engine, please translate the text into a colloquial, professional, elegant and fluent content, without the style of machine translation. You must only translate the text content, never interpret it.",
"You are a professional translation engine, please translate the text into a colloquial, professional, elegant and fluent content, without the style of machine translation. You must maintain the original markdown format. You must not translate the `[to_be_replace[x]]` field.You must only translate the text content, never interpret it."
] |
2024-01-10 | qlan3/MeDQN | envs~wrapper.py | # Borrow a lot from openai baselines:
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
from collections import deque
import cv2
import gym
import numpy as np
class NoopResetEnv(gym.Wrapper):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
:param gym.Env env: the environment to wrap.
:param int noop_max: the maximum value of no-ops to run.
"""
def __init__(self, env, noop_max=30):
super().__init__(env)
self.noop_max = noop_max
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self):
self.env.reset()
if hasattr(self.unwrapped.np_random, "integers"):
noops = self.unwrapped.np_random.integers(1, self.noop_max + 1)
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset()
return obs
class MaxAndSkipEnv(gym.Wrapper):
"""Return only every `skip`-th frame (frameskipping) using most recent raw
observations (for max pooling across time steps)
:param gym.Env env: the environment to wrap.
:param int skip: number of `skip`-th frame.
"""
def __init__(self, env, skip=4):
super().__init__(env)
self._skip = skip
def step(self, action):
"""Step the environment with the given action. Repeat action, sum
reward, and max over last observations.
"""
obs_list, total_reward, done = [], 0., False
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
obs_list.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(obs_list[-2:], axis=0)
return max_frame, total_reward, done, info
class EpisodicLifeEnv(gym.Wrapper):
"""Make end-of-life == end-of-episode, but only reset on true game over. It
helps the value estimation.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal, then update lives to
# handle bonus lives
lives = self.env.unwrapped.ale.lives()
if 0 < lives < self.lives:
# for Qbert sometimes we stay in lives == 0 condition for a few
# frames, so its important to keep lives > 0, so that we only reset
# once the environment is actually done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self):
"""Calls the Gym environment reset, only when lives are exhausted. This
way all states are still reachable even though lives are episodic, and
the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
else:
# no-op step to advance from terminal/lost life state
obs = self.env.step(0)[0]
self.lives = self.env.unwrapped.ale.lives()
return obs
class FireResetEnv(gym.Wrapper):
"""Take action on reset for environments that are fixed until firing.
Related discussion: https://github.com/openai/baselines/issues/240
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self):
self.env.reset()
return self.env.step(1)[0]
class WarpFrame(gym.ObservationWrapper):
"""Warp frames to 84x84 as done in the Nature paper and later work.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.size = 84
self.observation_space = gym.spaces.Box(
low=np.min(env.observation_space.low),
high=np.max(env.observation_space.high),
shape=(self.size, self.size),
dtype=env.observation_space.dtype
)
def observation(self, frame):
"""returns the current observation from a frame"""
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
return cv2.resize(frame, (self.size, self.size), interpolation=cv2.INTER_AREA)
class ScaledFloatFrame(gym.ObservationWrapper):
"""Normalize observations to 0~1.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
low = np.min(env.observation_space.low)
high = np.max(env.observation_space.high)
self.bias = low
self.scale = high - low
self.observation_space = gym.spaces.Box(
low=0., high=1., shape=env.observation_space.shape, dtype=np.float32
)
def observation(self, observation):
return (observation - self.bias) / self.scale
class ClipRewardEnv(gym.RewardWrapper):
"""clips the reward to {+1, 0, -1} by its sign.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.reward_range = (-1, 1)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign. Note: np.sign(0) == 0."""
return np.sign(reward)
class FrameStack(gym.Wrapper):
"""Stack n_frames last frames.
:param gym.Env env: the environment to wrap.
:param int n_frames: the number of frames to stack.
"""
def __init__(self, env, n_frames):
super().__init__(env)
self.n_frames = n_frames
self.frames = deque([], maxlen=n_frames)
shape = (n_frames, ) + env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=np.min(env.observation_space.low),
high=np.max(env.observation_space.high),
shape=shape,
dtype=env.observation_space.dtype
)
def reset(self):
obs = self.env.reset()
for _ in range(self.n_frames):
self.frames.append(obs)
return self._get_ob()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.frames.append(obs)
return self._get_ob(), reward, done, info
def _get_ob(self):
# the original wrapper use `LazyFrames` but since we use np buffer,
# it has no effect
return np.stack(self.frames, axis=0)
def wrap_deepmind(
env_id,
episode_life=True,
clip_rewards=True,
frame_stack=4,
scale=False,
warp_frame=True
):
"""Configure environment for DeepMind-style Atari. The observation is
channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert 'NoFrameskip' in env_id
env = gym.make(env_id)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
if warp_frame:
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, frame_stack)
return env | [] |
2024-01-10 | SamarthK1239/OpenAI-Api-Shenanigans | OpenAI-API~image_variation.py | import os
from pathlib import Path
import requests
from dotenv import load_dotenv
from openai import OpenAI
path = Path("Environment-Variables/.env")
load_dotenv(dotenv_path=path)
# set up the openai client
openai = OpenAI(
organization=os.getenv('organization'),
api_key=os.getenv("api_key")
)
def create_variation():
response = openai.images.create_variation(
image=open("generated_image.jpg", "rb"),
n=1,
size="1024x1024"
)
image_url = response.data[0].url
response = requests.get(image_url)
# Save and open image on local machine
with open("generated_image_revised.jpg", "wb") as f:
f.write(response.content) | [] |
2024-01-10 | SamarthK1239/OpenAI-Api-Shenanigans | OpenAI-API~Storyteller~storyteller.py | import os
from pathlib import Path
import file_operations as fo
from dotenv import load_dotenv
from openai import OpenAI
# Get environment variables
path = Path("Environment-Variables/.env")
load_dotenv(dotenv_path=path)
# Setup OpenAI client
client = OpenAI(
organization=os.getenv('organization'),
api_key=os.getenv("api_key")
)
# Get category
category = input("What category would you like to generate a story from? ")
# Get a random prompt from the category
prompt = fo.read_category(category)
prompt = "Use the following prompt to generate an interactive story. Ask a question at the end of each response, and let the user respond with what they would do: " + prompt
conversation_history = [{"role": "user", "content": prompt}]
# Set up the starting GPT prompt
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=conversation_history
)
# Print the response
print(response.choices[0].message.content)
conversation_history.append({"role": "system", "content": response.choices[0].message.content})
# Start the user/GPT interaction
while True:
# Get the user's response
user_response = input("What would you do? ")
conversation_history.append({"role": "user", "content": user_response})
print(conversation_history)
# Generate a response from GPT
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=conversation_history
)
# Print the response
print(response.choices[0].message.content)
conversation_history.append({"role": "system", "content": response.choices[0].message.content})
| [
"Use the following prompt to generate an interactive story. Ask a question at the end of each response, and let the user respond with what they would do: Use the following prompt to generate an interactive story. Ask a question at the end of each response, and let the user respond with what they would do: prompt2dd6dbe6-5c55-4078-9149-4d85c6abd0bc",
"Use the following prompt to generate an interactive story. Ask a question at the end of each response, and let the user respond with what they would do: promptfbaf4e53-2621-4286-80b9-6ca50c2432e8"
] |
2024-01-10 | SamarthK1239/OpenAI-Api-Shenanigans | OpenAI-API~Summarizer~Summarizer.py | import os
from pathlib import Path
from dotenv import load_dotenv
from openai import OpenAI
import TokenSplitter
path = Path("../Environment-Variables/.env")
load_dotenv(dotenv_path=path)
# Set up openai client
openai = OpenAI(
organization=os.getenv('organization'),
api_key=os.getenv("api_key")
)
# Read transcription file
with open("transcription.txt") as f:
transcription = f.readline()
# Parameter Meanings for response generation
# temperature: Controls Randomness. Lower means less random completions. As this value approaches zero, the model becomes very deterministic
# max_tokens: Maximum of 4000 tokens shared between prompt and completion (input and output)
# top_p: Controls diversity. 0.5 means half of all weighted options are considered
# frequency_penalty: Penalizes new tokens based on frequencies. Decreases the chances of repetition of the same lines
# presence_penalty: Penalizes new tokens based on if they show up already. Increases the likelihood of new topics coming up
# best_of: Generates the specified number of items and then returns the best one
prompt = "Comprehensively summarize this for a university student. Using bullet points to organize the summary, " \
"Go through every piece of advice provided by the speaker. " \
"If you can use technical programming terms, be sure to reference them.\n" + transcription
# First generation pass using davinci-003 model
response = openai.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "user", "content": prompt},
]
)
print(response.choices[0].message.content)
# Fact Checking pass, uses same model as above
fact_checked_response = openai.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "user", "content": "Clarify each bullet point: "},
{"role": "user", "content": response.choices[0].message.content}
]
)
print(fact_checked_response.choices[0].message.content)
# Detail-addition pass, using same model as above
final_detailed_response = openai.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "user", "content": "Add as much detail as you can to each bullet point. Use paragraphs to organize your response."},
{"role": "user", "content": fact_checked_response.choices[0].message.content}
]
)
print(final_detailed_response.choices[0].message.content)
# Print final response after all three passes
print("Final Result:", final_detailed_response.choices[0].message.content)
| [
"Add as much detail as you can to each bullet point. Use paragraphs to organize your response.",
"Comprehensively summarize this for a university student. Using bullet points to organize the summary, Go through every piece of advice provided by the speaker. If you can use technical programming terms, be sure to reference them.\nPLACEHOLDER",
"Clarify each bullet point: "
] |
2024-01-10 | SamarthK1239/OpenAI-Api-Shenanigans | OpenAI-API~EquationSolver~EquationSolver.py | # wolfram alpha API
import wolframalpha
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
# Still have no idea why this works but hey I'm not complaining
path = Path("Environment-Variables/.env")
load_dotenv(dotenv_path=path)
# Setting the API Key
client = wolframalpha.Client(os.getenv("wlf_appid"))
# Quick and dirty way of solving equations
def solveEquation(equation):
response = client.query(equation)
return next(response.results).text
| [] |
2024-01-10 | SamarthK1239/OpenAI-Api-Shenanigans | OpenAI-API~SentimentAnalyzer.py | import os
from pathlib import Path
from dotenv import load_dotenv
from openai import OpenAI
path = Path("Environment-Variables/.env")
load_dotenv(dotenv_path=path)
# Set up the openai client
openai = OpenAI(
organization=os.getenv('organization'),
api_key=os.getenv("api_key")
)
# Generate response using davinci-003
# Parameter meanings are listed in Summarizer.py
response = openai.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "user", "content": "What is the sentiment of this text? Respond with one of the following: Positive, Negative, Neutral, and rank it on a scale of 1 - 10 where 1 is heavily negative and 10 is heavily positive."},
{"role": "user", "content": input("What text would you like to classify? ")}
]
)
# Print the response text
print(response.choices[0].message.content)
| [
"What is the sentiment of this text? Respond with one of the following: Positive, Negative, Neutral, and rank it on a scale of 1 - 10 where 1 is heavily negative and 10 is heavily positive.",
"What text would you like to classify? "
] |
2024-01-10 | SamarthK1239/OpenAI-Api-Shenanigans | OpenAI-API~EquationSolver~problem_to_equation.py | # Using a class proved necessary in this case, as the OpenAI API requires the API key and organization key to be set as environment variables.
class ChatGPT:
# Same old imports
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
# Load the environment variables
path = Path("EquationSolver/Environment-Variables/.env")
load_dotenv(dotenv_path=path)
# Don't really need this, might remove it later
ORGKEY = os.getenv('organization')
APIKEY = os.getenv("api_key")
# Initialize the class
def __init__(self):
self.openai.organization = self.ORGKEY
self.openai.api_key = self.APIKEY
# Function to convert the problem to an equation (switched from davinci-003 to gpt-3.5-turbo)
# Prompt design is especially important here
def convertProblemToEquation(self):
word_problem = input("Enter a word problem: ")
response = self.openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "Use the word problem from below to create an arithmetic equation(s), using any numerical figures from the question. You may create multiple equations if required to answer the question."
"Respond with only mathematical equation(s) and no text whatsoever. Ensure that the equation(s) you provide can be directly entered into a tool like "
"symbolab to obtain an answer. Include brackets wherever needed for clarity. \n" + word_problem
}
],
# prompt="Use the word problem from below to create an equation, using any numerical figures from the question. Respond with only a mathematical equation and no text whatsoever. I do not need any explanatory text accompanying the equation. \n" + word_problem,
temperature=0.3,
max_tokens=64,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["\n"]
)
return response["choices"][0]["message"]["content"]
# Deprecated function, kept for reference to previous versions ONLY
# Don't use this, it's not very good, and the model referenced is deprecated lol
def extractEquation(self, response):
equation = self.openai.Completion.create(
model="text-davinci-003",
prompt="From this text, extract an equation which i can put into an equation solver such as symbolab, and respond with only the equation and no accompanying text: \n" + response,
temperature=0.3,
max_tokens=64,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["\n"]
)
return equation["choices"][0]["text"]
| [
"Use the word problem from below to create an arithmetic equation(s), using any numerical figures from the question. You may create multiple equations if required to answer the question.Respond with only mathematical equation(s) and no text whatsoever. Ensure that the equation(s) you provide can be directly entered into a tool like symbolab to obtain an answer. Include brackets wherever needed for clarity. \nPLACEHOLDER",
"From this text, extract an equation which i can put into an equation solver such as symbolab, and respond with only the equation and no accompanying text: \nPLACEHOLDER"
] |
2024-01-10 | crytic/slither | slither~utils~codex.py | import logging
import os
from argparse import ArgumentParser
from pathlib import Path
from slither.utils.command_line import defaults_flag_in_config
logger = logging.getLogger("Slither")
def init_parser(parser: ArgumentParser, always_enable_codex: bool = False) -> None:
"""
Init the cli arg with codex features
Args:
parser:
always_enable_codex (Optional(bool)): if true, --codex is not enabled
Returns:
"""
group_codex = parser.add_argument_group("Codex (https://beta.openai.com/docs/guides/code)")
if not always_enable_codex:
group_codex.add_argument(
"--codex",
help="Enable codex (require an OpenAI API Key)",
action="store_true",
default=defaults_flag_in_config["codex"],
)
group_codex.add_argument(
"--codex-log",
help="Log codex queries (in crytic_export/codex/)",
action="store_true",
default=False,
)
group_codex.add_argument(
"--codex-contracts",
help="Comma separated list of contracts to submit to OpenAI Codex",
action="store",
default=defaults_flag_in_config["codex_contracts"],
)
group_codex.add_argument(
"--codex-model",
help="Name of the Codex model to use (affects pricing). Defaults to 'text-davinci-003'",
action="store",
default=defaults_flag_in_config["codex_model"],
)
group_codex.add_argument(
"--codex-temperature",
help="Temperature to use with Codex. Lower number indicates a more precise answer while higher numbers return more creative answers. Defaults to 0",
action="store",
default=defaults_flag_in_config["codex_temperature"],
)
group_codex.add_argument(
"--codex-max-tokens",
help="Maximum amount of tokens to use on the response. This number plus the size of the prompt can be no larger than the limit (4097 for text-davinci-003)",
action="store",
default=defaults_flag_in_config["codex_max_tokens"],
)
group_codex.add_argument(
"--codex-organization",
help="Codex organization",
action="store",
default=None,
)
# TODO: investigate how to set the correct return type
# So that the other modules can work with openai
def openai_module(): # type: ignore
"""
Return the openai module
Consider checking the usage of open (slither.codex_enabled) before using this function
Returns:
Optional[the openai module]
"""
try:
# pylint: disable=import-outside-toplevel
import openai
api_key = os.getenv("OPENAI_API_KEY")
if api_key is None:
logger.info(
"Please provide an Open API Key in OPENAI_API_KEY (https://beta.openai.com/account/api-keys)"
)
return None
openai.api_key = api_key
except ImportError:
logger.info("OpenAI was not installed") # type: ignore
logger.info('run "pip install openai"')
return None
return openai
def log_codex(filename: str, prompt: str) -> None:
"""
Log the prompt in crytic/export/codex/filename
Append to the file
Args:
filename: filename to write to
prompt: prompt to write
Returns:
None
"""
Path("crytic_export/codex").mkdir(parents=True, exist_ok=True)
with open(Path("crytic_export/codex", filename), "a", encoding="utf8") as file:
file.write(prompt)
file.write("\n")
| [] |
2024-01-10 | memasanz/streamlitsearchapp | webapp.py | import json
import openai
import streamlit as st
from streamlit_chat import message
from streamlit_option_menu import option_menu
from credentials import *
from cog_search import *
def update_creds():
with open('credentials.py') as f:
l = list(f)
for attribute, value in creds.items():
with open('credentials.py', 'w') as output:
for line in l:
if line.startswith(attribute):
print('found attribute: ' + attribute + ' = "' + value + '"\n')
print('about to write: ' + attribute + ' = "' + value + '"\n')
output.write( attribute + ' = "' + value + '"\n')
else:
output.write(line)
f.close()
output.close()
#reads from credentials.pyand puts values of creds into session_state
def set_creds():
for attribute, value in creds.items():
if attribute not in st.session_state:
st.session_state[attribute] = value
#print session_state message for chat like experience
def print_messages():
for i in range (len(st.session_state.messages) -1, -1, -1):
msg = st.session_state.messages[i]
if msg is not None:
if msg["role"] == "user":
message(msg["content"], is_user=True, key = str(i) + "user", avatar_style = "initials", seed = "๐ค")
else:
if msg["role"] == "assistant":
print(msg["content"])
if msg["content"] == "I don't know" or msg["content"] == "I don't know." or msg['content'] == "Sorry, I don't know the answer to that question. Please try rephrasing your question.":
message(msg["content"], is_user=False, key = str(i) + "system", avatar_style="initials", seed = "๐")
elif msg['content'] == "How can I help you?":
message(msg["content"], is_user=False, key = str(i) + "system", avatar_style="initials", seed = "๐")
else:
message(msg["content"], is_user=False, key = str(i) + "system", avatar_style="initials", seed = "๐")
#clear session_state messages - this is on the settings page
def settings_form():
reset = st.checkbox('Reset Messages')
if reset:
st.write('Sure thing!')
st.session_state.messages = [{"role":"system","content":"You are an AI assistant that helps people find information."}]
st.session_state.messages.append({"role": "assistant", "content": "How can I help you?"})
print("complteted reset")
#display home
#main screen
if __name__ == "__main__":
if "messages" not in st.session_state:
print("messages not in session state")
st.session_state["messages"] = [{"role":"system","content":"You are an AI assistant that helps people find information."}]
st.session_state.messages.append({"role": "assistant", "content": "How can I help you?"})
with st.sidebar:
set_creds()
menu_index = ['Home', 'Settings', 'Upload file', 'Chat']
menu_icons = ['house', 'gear', 'cloud-upload', 'chat']
selected = option_menu("Main Menu",menu_index, icons=menu_icons, menu_icon="cast", default_index=1)
if selected == 'Home':
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
st.title("Welcome to the AI Assistant")
with st.form("chat_input", clear_on_submit=True):
a, b = st.columns([4, 1])
user_input = a.text_input(
label="Your message:",
placeholder="What would you like to say?",
label_visibility="collapsed",
)
b.form_submit_button("Send")
openai.api_type = "azure"
openai.api_base = creds['AZURE_OPENAI_ENDPOINT']
openai.api_version = "2023-03-15-preview"
openai.api_key = creds['AZURE_OPENAI_KEY']
if user_input and AZURE_OPENAI_KEY:
st.session_state.messages.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages = st.session_state.messages,
temperature=0.0,
max_tokens=200,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None)
print(response)
if response.choices[0].message.content != None:
st.session_state.messages.append({"role": "assistant", "content": response.choices[0].message.content})
print_messages()
elif selected == 'Upload file':
st.title('Upload file')
elif selected == 'Chat':
st.title('Chat')
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
st.title("Cognitive Search & Azure OpenAI")
with st.form("chat_input", clear_on_submit=True):
a, b = st.columns([4, 1])
user_input_ondata = a.text_input(
label="Your message:",
placeholder="What would you like to ask?",
label_visibility="collapsed",
)
b.form_submit_button("Send")
if user_input_ondata and AZURE_OPENAI_KEY:
question = user_input_ondata
st.session_state.messages.append({"role": "user", "content": question})
arg = OpenAIHelper(creds['COG_SEARCH_INDEX'])
response = arg.get_Answer(user_input_ondata)
st.session_state.messages.append({"role": "assistant", "content": response})
print_messages()
elif selected == 'Settings':
settings_form()
with st.form("my_form"):
st.write("Configuration Settings")
azure_openai_endpoint = st.text_input("azure_openai_endpoint", creds["AZURE_OPENAI_ENDPOINT"])
azure_openai_key = st.text_input("azure_openai_key", creds["AZURE_OPENAI_KEY"], type = "password")
txt_davinci = st.text_input("txt davinici", creds["TEXT_DAVINCI"])
cog_search_resource = st.text_input("Cog Search Resource",creds["COG_SEARCH_RESOURCE"])
cog_search_index = st.text_input("Cog Search Index", creds["COG_SEARCH_INDEX"])
cog_service_key = st.text_input("Cog Search Key", creds["COG_SEARCH_KEY"], type = "password")
storage_connection_string = st.text_input("Storage Connection String", creds["STORAGE_CONNECTION_STRING"], type="password")
storage_account = st.text_input("Storage Account", creds["STORAGE_ACCOUNT"])
storage_container = st.text_input("Storage Container", creds["STORAGE_CONTAINER"])
storage_key = st.text_input("Storage Key", creds["STORAGE_KEY"], type = "password")
submitted = st.form_submit_button("Submit")
#don't use this to update the search index.
if submitted:
creds["AZURE_OPENAI_ENDPOINT"] = azure_openai_endpoint
creds["AZURE_OPENAI_KEY"] = azure_openai_key
creds["TEXT_DAVINCI"] = txt_davinci
creds["COG_SEARCH_RESOURCE"] = cog_search_resource
#creds["COG_SEARCH_INDEX"] = cog_search_index
creds["COG_SEARCH_KEY"] = cog_service_key
creds["STORAGE_CONNECTION_STRING"] = storage_connection_string
creds["STORAGE_ACCOUNT"] = storage_account
creds["STORAGE_CONTAINER"] = storage_container
creds["STORAGE_KEY"] = storage_key
set_creds()
# update_creds()
st.write("Settings updated")
with st.form("create index"):
st.write("Create Index")
create_index = st.form_submit_button("SubmitCreateIndex")
if create_index:
cogSearch = CogSearchHelper(index = creds["COG_SEARCH_INDEX"])
response, success = cogSearch.create_datasource()
if success:
st.write("Data source created")
response, success = cogSearch.create_skillset()
if success:
st.write("Skillset created")
response, success = cogSearch.create_index()
if success:
st.write("Index created")
with st.spinner(text="In progress..."):
response, success = cogSearch.create_indexer()
if success:
st.write("Running indexer") | [
"How can I help you?",
"You are an AI assistant that helps people find information."
] |
2024-01-10 | memasanz/streamlitsearchapp | cog_search.py | import requests
import json
from credentials import *
import json
import numpy as np
import os
from langchain.llms import AzureOpenAI
#from langchain import FAISS
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains.question_answering import load_qa_chain
from openai.embeddings_utils import get_embedding, cosine_similarity
import openai
from transformers import GPT2TokenizerFast
import pandas as pd
class CogSearchHelper:
def __init__(self, index):
self.service_name = creds['COG_SEARCH_RESOURCE']
self.search_key = creds['COG_SEARCH_KEY']
self.storage_connectionstring = creds['STORAGE_CONNECTION_STRING']
self.storage_container = creds['STORAGE_CONTAINER']
self.cognitive_service_key = creds['COG_SERVICE_KEY']
if index == None:
self.index = creds['COG_SEARCH_INDEX']
else:
self.index = index
def get_the_token_count(self, documents):
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
total_token_count = 0
try:
token_count = len(tokenizer.encode(documents))
except:
print('failed to get token count')
token_count = -1
pass
return token_count
def search_single_docs(df, user_query, TEXT_SEARCH_QUERY_EMBEDDING_ENGINE, top_n=3):
embedding = get_embedding(
user_query,
engine=TEXT_SEARCH_QUERY_EMBEDDING_ENGINE
)
df["similarities"] = df.curie_search.apply(lambda x: cosine_similarity(x, embedding))
res = (
df.sort_values("similarities", ascending=False)
.reset_index(drop=True)
.head(top_n)
)
return res
def search_semantic(self, question):
print('searching semantic')
response = openai.Embedding.create(input=question,engine="text-embedding-ada-002")
q_embeddings = response['data'][0]['embedding']
if len(question) > 0:
endpoint = "https://{}.search.windows.net/".format(self.service_name)
url = '{0}indexes/{1}/docs/search?api-version=2021-04-30-Preview'.format(endpoint, self.index)
print(url)
payload = json.dumps({
"search": question,
"queryType": "semantic",
"queryLanguage": "en-us",
"captions": "extractive",
"answers": "extractive",
"semanticConfiguration": "semanic-config",
"count": True,
})
headers = {
'api-key': '{0}'.format(self.search_key),
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
obj = response.json()
try:
answer = obj['@search.answers'][0]['text']
except:
answer = obj['value'][0]['@search.captions'][0]['text']
pass
relevant_data = []
lst_embeddings_text = []
lst_embeddings = []
lst_file_name = []
count = 0
#should only grab 1 from each document.
for x in obj['value']:
if x['@search.rerankerScore'] > 0.5:
count += 1
relevant_data.append(x['content'])
embeddings = x['embeddings']
embeddings_text = x['embeddings_text']
file_name = x['metadata_storage_name']
curie_search = []
for x in embeddings:
a = np.fromstring(x[1:-1], dtype=float, sep=',')
curie_search.append(a)
curie_list = list(curie_search)
#get the most relevant embedding and the most relevant text for the document
df = pd.DataFrame(list(zip(embeddings_text, curie_list)),columns =['text', 'embedding_values'])
df["similarities"] = df.embedding_values.apply(lambda x: cosine_similarity(x, q_embeddings))
res = (df.sort_values("similarities", ascending=False).reset_index(drop=True).head(1))
embedding_text_most_relevant = res['text'][0]
embedding_vector_most_relevant = res['embedding_values'][0]
# print('embedding_text_most_relevant = ' + embedding_text_most_relevant)
# print('embedding_vector_most_relevant = ' + str(embedding_vector_most_relevant))
lst_embeddings_text.append(embedding_text_most_relevant)
lst_embeddings.append(embedding_vector_most_relevant)
lst_file_name.append(file_name)
# for i in range(len(embeddings)):
# lst_embeddings_text.append(embeddings_text[i])
# lst_embeddings.append(np.fromstring(embeddings[i][1:-1], dtype=float, sep=','))
# lst_file_name.append(file_name)
tuples_list = []
tokencount = 0
for i in range(len(lst_embeddings_text)):
tuples_list.append((lst_embeddings_text[i], lst_embeddings[i]))
# print('tuples_list = ' )
# print(tuples_list)
return answer, relevant_data, count, lst_file_name, tuples_list, lst_embeddings_text
def create_datasource(self):
endpoint = "https://{}.search.windows.net/".format(self.service_name)
url = '{0}/datasources/{1}-datasource?api-version=2020-06-30'.format(endpoint, self.index)
print(url)
payload = json.dumps({
"description": "Demo files to demonstrate cognitive search capabilities.",
"type": "azureblob",
"credentials": {
"connectionString": self.storage_connectionstring
},
"container": {
"name": self.storage_container
}
})
headers = {
'api-key': self.search_key,
'Content-Type': 'application/json'
}
response = requests.request("PUT", url, headers=headers, data=payload)
if response.status_code == 201 or response.status_code == 204:
return response, True
else:
return response, False
def create_index(self):
endpoint = "https://{}.search.windows.net/".format(self.service_name)
url = '{0}/indexes/{1}/?api-version=2021-04-30-Preview'.format(endpoint, self.index)
print(url)
payload = json.dumps({
"name": self.index,
"defaultScoringProfile": "",
"fields": [
{
"name": "content",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "metadata_storage_content_type",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_size",
"type": "Edm.Int64",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_last_modified",
"type": "Edm.DateTimeOffset",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_content_md5",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_name",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_path",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": True,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_file_extension",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_content_type",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_language",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_creation_date",
"type": "Edm.DateTimeOffset",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "people",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "organizations",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "locations",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "keyphrases",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "language",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "translated_text",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "en.lucene",
"synonymMaps": []
},
{
"name": "embeddings_text",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "embeddings",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "pii_entities",
"type": "Collection(Edm.ComplexType)",
"fields": [
{
"name": "text",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "type",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "subtype",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "offset",
"type": "Edm.Int32",
"searchable": False,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "length",
"type": "Edm.Int32",
"searchable": False,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "score",
"type": "Edm.Double",
"searchable": False,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
}
]
},
{
"name": "masked_text",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "merged_content",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "text",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "layoutText",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "imageTags",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "imageCaption",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
}
],
"scoringProfiles": [],
"corsOptions": None,
"suggesters": [],
"semantic": {
"defaultConfiguration": None,
"configurations": [
{
"name": "semanic-config",
"prioritizedFields": {
"titleField": {
"fieldName": "metadata_storage_name"
},
"prioritizedContentFields": [
{
"fieldName": "merged_content"
}
],
"prioritizedKeywordsFields": [
{
"fieldName": "keyphrases"
},
{
"fieldName": "people"
},
{
"fieldName": "locations"
}
]
}
}
]
},
"analyzers": [],
"tokenizers": [],
"tokenFilters": [],
"charFilters": [],
"encryptionKey": None,
"similarity": {
"@odata.type": "#Microsoft.Azure.Search.BM25Similarity",
"k1": None,
"b": None
}
})
headers = {
'api-key': self.search_key,
'Content-Type': 'application/json'
}
response = requests.request("PUT", url, headers=headers, data=payload)
if response.status_code == 201 or response.status_code == 204:
return response, True
else:
# print('************************')
# print(response.status_code)
# print(response.text)
return response, False
def create_skillset(self):
endpoint = "https://{}.search.windows.net/".format(self.service_name)
appfunctionurl = creds['APP_FUNCTION_URL']
print(appfunctionurl)
url = '{0}/skillsets/{1}-skillset?api-version=2021-04-30-Preview'.format(endpoint, self.index)
print(url)
payload = json.dumps({
"@odata.context": "https://mmx-cog-search.search.windows.net/$metadata#skillsets/$entity",
"@odata.etag": "\"0x8DB2B4BF82370CF\"",
"name": "{0}-skillset".format(self.index),
"description": "Skillset created from the portal. skillsetName: index-skillset; contentField: merged_content; enrichmentGranularity: document; knowledgeStoreStorageAccount: ;",
"skills": [
{
"@odata.type": "#Microsoft.Skills.Text.V3.EntityRecognitionSkill",
"name": "#1",
"description": None,
"context": "/document/merged_content",
"categories": [
"Organization",
"URL",
"DateTime",
"Skill",
"Address",
"Location",
"Product",
"IPAddress",
"Event",
"Person",
"Quantity",
"PersonType",
"PhoneNumber",
"Email"
],
"defaultLanguageCode": "en",
"minimumPrecision": None,
"modelVersion": None,
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
},
{
"name": "languageCode",
"source": "/document/language"
}
],
"outputs": [
{
"name": "persons",
"targetName": "people"
},
{
"name": "organizations",
"targetName": "organizations"
},
{
"name": "locations",
"targetName": "locations"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Text.KeyPhraseExtractionSkill",
"name": "#2",
"description": None,
"context": "/document/merged_content",
"defaultLanguageCode": "en",
"maxKeyPhraseCount": None,
"modelVersion": None,
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
},
{
"name": "languageCode",
"source": "/document/language"
}
],
"outputs": [
{
"name": "keyPhrases",
"targetName": "keyphrases"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Text.LanguageDetectionSkill",
"name": "#3",
"description": None,
"context": "/document",
"defaultCountryHint": None,
"modelVersion": None,
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
}
],
"outputs": [
{
"name": "languageCode",
"targetName": "language"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Text.TranslationSkill",
"name": "#4",
"description": None,
"context": "/document/merged_content",
"defaultFromLanguageCode": None,
"defaultToLanguageCode": "en",
"suggestedFrom": "en",
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
}
],
"outputs": [
{
"name": "translatedText",
"targetName": "translated_text"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Text.PIIDetectionSkill",
"name": "#5",
"description": None,
"context": "/document/merged_content",
"defaultLanguageCode": "en",
"minimumPrecision": 0.5,
"maskingMode": "replace",
"maskingCharacter": "*",
"modelVersion": None,
"piiCategories": [],
"domain": "none",
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
},
{
"name": "languageCode",
"source": "/document/language"
}
],
"outputs": [
{
"name": "piiEntities",
"targetName": "pii_entities"
},
{
"name": "maskedText",
"targetName": "masked_text"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Text.MergeSkill",
"name": "#6",
"description": None,
"context": "/document",
"insertPreTag": " ",
"insertPostTag": " ",
"inputs": [
{
"name": "text",
"source": "/document/content"
},
{
"name": "itemsToInsert",
"source": "/document/normalized_images/*/text"
},
{
"name": "offsets",
"source": "/document/normalized_images/*/contentOffset"
}
],
"outputs": [
{
"name": "mergedText",
"targetName": "merged_content"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Vision.OcrSkill",
"name": "#7",
"description": None,
"context": "/document/normalized_images/*",
"textExtractionAlgorithm": None,
"lineEnding": "Space",
"defaultLanguageCode": "en",
"detectOrientation": True,
"inputs": [
{
"name": "image",
"source": "/document/normalized_images/*"
}
],
"outputs": [
{
"name": "text",
"targetName": "text"
},
{
"name": "layoutText",
"targetName": "layoutText"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Vision.ImageAnalysisSkill",
"name": "#8",
"description": None,
"context": "/document/normalized_images/*",
"defaultLanguageCode": "en",
"visualFeatures": [
"tags",
"description"
],
"details": [],
"inputs": [
{
"name": "image",
"source": "/document/normalized_images/*"
}
],
"outputs": [
{
"name": "tags",
"targetName": "imageTags"
},
{
"name": "description",
"targetName": "imageCaption"
}
]
}
,
{
"@odata.type": "#Microsoft.Skills.Custom.WebApiSkill",
"uri": appfunctionurl,
"httpMethod": "POST",
"timeout": "PT230S",
"batchSize": 1,
"degreeOfParallelism": 1,
"name": "Embeddings",
"description": "",
"context": "/document",
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
},
{
"name": "filename",
"source": "/document/metadata_storage_name"
}
],
"outputs": [
{
"name": "embeddings",
"targetName": "embeddings"
},
{
"name": "embeddings_text",
"targetName": "embeddings_text"
}
]
}
],
"cognitiveServices": {
"@odata.type": "#Microsoft.Azure.Search.CognitiveServicesByKey",
"description": "/subscriptions/b071bca8-0055-43f9-9ff8-ca9a144c2a6f/resourceGroups/mmx-cognitive-services-rg/providers/Microsoft.CognitiveServices/accounts/xmm-cognitive-services",
"key": "{0}".format(self.cognitive_service_key)
},
"knowledgeStore": None,
"encryptionKey": None
})
headers = {
'Content-Type': 'application/json',
'api-key': '{0}'.format(self.search_key)
}
response = requests.request("PUT", url, headers=headers, data=payload)
if response.status_code == 201 or response.status_code == 204:
return response, True
else:
return response, False
def create_indexer(self):
endpoint = "https://{}.search.windows.net/".format(self.service_name)
url = '{0}/indexers/{1}-indexer/?api-version=2021-04-30-Preview'.format(endpoint, self.index)
print(url)
payload = json.dumps({
"name": "{0}-indexer".format(self.index),
"description": "",
"dataSourceName": "{0}-datasource".format(self.index),
"skillsetName": "{0}-skillset".format(self.index),
"targetIndexName": "{0}".format(self.index),
"disabled": None,
"schedule": None,
"parameters": {
"batchSize": None,
"maxFailedItems": 0,
"maxFailedItemsPerBatch": 0,
"base64EncodeKeys": None,
"configuration": {
"dataToExtract": "contentAndMetadata",
"parsingMode": "default",
"imageAction": "generateNormalizedImages"
}
},
"fieldMappings": [
{
"sourceFieldName": "metadata_storage_path",
"targetFieldName": "metadata_storage_path",
"mappingFunction": {
"name": "base64Encode",
"parameters": None
}
}
],
"outputFieldMappings": [
{
"sourceFieldName": "/document/merged_content/people",
"targetFieldName": "people"
},
{
"sourceFieldName": "/document/merged_content/organizations",
"targetFieldName": "organizations"
},
{
"sourceFieldName": "/document/merged_content/locations",
"targetFieldName": "locations"
},
{
"sourceFieldName": "/document/merged_content/keyphrases",
"targetFieldName": "keyphrases"
},
{
"sourceFieldName": "/document/language",
"targetFieldName": "language"
},
{
"sourceFieldName": "/document/merged_content/translated_text",
"targetFieldName": "translated_text"
},
{
"sourceFieldName": "/document/merged_content/pii_entities",
"targetFieldName": "pii_entities"
},
{
"sourceFieldName": "/document/merged_content/masked_text",
"targetFieldName": "masked_text"
},
{
"sourceFieldName": "/document/merged_content",
"targetFieldName": "merged_content"
},
{
"sourceFieldName": "/document/normalized_images/*/text",
"targetFieldName": "text"
},
{
"sourceFieldName": "/document/normalized_images/*/layoutText",
"targetFieldName": "layoutText"
},
{
"sourceFieldName": "/document/normalized_images/*/imageTags/*/name",
"targetFieldName": "imageTags"
},
{
"sourceFieldName": "/document/normalized_images/*/imageCaption",
"targetFieldName": "imageCaption"
},
{
"sourceFieldName": "/document/embeddings",
"targetFieldName": "embeddings"
},
{
"sourceFieldName": "/document/embeddings_text",
"targetFieldName": "embeddings_text"
}
],
"cache": None,
"encryptionKey": None
})
headers = {
'Content-Type': 'application/json',
'api-key': '{0}'.format(self.search_key)
}
response = requests.request("PUT", url, headers=headers, data=payload)
if response.status_code == 201 or response.status_code == 204:
print('good')
return response, True
else:
print(response.status_code)
return response, False
def run_indexer(self):
endpoint = "https://{}.search.windows.net/".format(self.service_name)
url = '{0}/indexers/{1}/run?api-version=2021-04-30-Preview'.format(endpoint, self.index + '-indexer')
headers = {
'api-key': self.search_key,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers)
print(response.text)
class OpenAIHelper:
def __init__(self, index):
self.question_template = creds['QUESTION_TEMPLATE']
if index == None:
self.index = creds['COG_SEARCH_INDEX']
else:
self.index = index
def get_the_token_count(self, documents):
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
total_token_count = 0
try:
token_count = len(tokenizer.encode(documents))
except:
print('failed to get token count')
token_count = -1
pass
return token_count
def get_Answer(self, question):
print('Get Answer')
openai.api_type = "azure"
openai.api_base = creds['AZURE_OPENAI_ENDPOINT']
openai.api_version = "2022-12-01"
os.environ['OPENAI_API_KEY'] = creds['AZURE_OPENAI_KEY']
openai.api_key = os.getenv("OPENAI_API_KEY")
from openai.embeddings_utils import get_embedding, cosine_similarity
question_embedding = get_embedding(question,engine="text-embedding-ada-002") # engine should be set to the deployment name you chose when you deployed the text-embedding-ada-002 (Version 2) model)
print(question_embedding)
blah = CogSearchHelper(self.index)
answer, relevant_data, count, lst_file_name, embeddings_tuples, lst_embeddings_text = blah.search_semantic(question)
embeddings = OpenAIEmbeddings(openai_api_key=openai.api_key, chunk_size=1536)
full_question = creds['QUESTION_TEMPLATE'].format(question = question)
print('full questoin = ' + full_question)
print('relevant files:')
for x in lst_file_name:
print(x)
print(embeddings_tuples)
if len(embeddings_tuples) == 0:
return("Sorry, I don't know the answer to that question. Please try rephrasing your question.")
db = FAISS.from_embeddings(embeddings_tuples, embeddings)
docs_db = db.similarity_search_by_vector(question_embedding, k = 4)
#indexxtg3, and map reduce.
if self.get_the_token_count(full_question) + 100 < 3096:
print("running stuff....")
llm = AzureOpenAI(deployment_name=creds['TEXT_DAVINCI'], model_name="text-davinci-003", temperature=0.0, max_tokens=1000)
chain = load_qa_chain(llm, chain_type="stuff")
response = chain({"input_documents": docs_db, "question": full_question, "language": "English", "existing_answer" : ""}, return_only_outputs=True)
else:
print("running a map reduce....")
llm = AzureOpenAI(deployment_name=creds['TEXT_DAVINCI'], model_name="text-davinci-003", temperature=0.0, max_tokens=1000)
chain = load_qa_chain(llm, chain_type="map_reduce")
response = chain({"input_documents": docs_db, "question": full_question, "language": "English", "existing_answer" : ""}, return_only_outputs=True)
return(response['output_text'])
def get_FollowUpAnswer(self, question, new_docsearch, lst_file_name):
docs_db = new_docsearch.similarity_search(question)
full_question = self.question_template.format(question, lst_file_name)
llm = AzureOpenAI(deployment_name=creds['TEXT_DAVINCI'], model_name="text-davinci-003", temperature=0.0, max_tokens=2000)
chain = load_qa_chain(llm, chain_type="stuff")
response = chain({"input_documents": docs_db, "question": full_question, "language": "English", "existing_answer" : ""}, return_only_outputs=True)
return(response['output_text'])
| [] |
2024-01-10 | stanj98/codeinterpreter-api | codeinterpreterapi~chains~rm_dl_link.py | from langchain.base_language import BaseLanguageModel
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import AIMessage, OutputParserException
from codeinterpreterapi.prompts import remove_dl_link_prompt
def remove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = llm.predict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
async def aremove_download_link(
input_response: str,
llm: BaseLanguageModel,
) -> str:
messages = remove_dl_link_prompt.format_prompt(
input_response=input_response
).to_messages()
message = await llm.apredict_messages(messages)
if not isinstance(message, AIMessage):
raise OutputParserException("Expected an AIMessage")
return message.content
def test():
llm = ChatOpenAI(model="gpt-3.5-turbo-0613") # type: ignore
example = (
"I have created the plot to your dataset.\n\n"
"Link to the file [here](sandbox:/plot.png)."
)
print(remove_download_link(example, llm))
if __name__ == "__main__":
from dotenv import load_dotenv
load_dotenv()
test()
| [] |
2024-01-10 | kamda-cyrial/Similarity-Compute-Engine | similarity_engine.py | from dotenv import load_dotenv
import os
from openai import OpenAI
from pymongo import MongoClient
import math
from collections import defaultdict
import json
import time
TWEETS_DATABASE = "Tweets"
QUERY_DATABASE = "Queries"
load_dotenv()
openai_client = OpenAI()
def get_db_handle():
load_dotenv()
client = MongoClient(os.getenv("MONGO_STRING"))
db_handle = client[os.getenv("DB_NAME")]
return db_handle, client
def create_json_matrix_prompt(statements_1, statements_2):
intro_prompt = (
"As an AI, you are tasked with evaluating the level of agreement or disagreement between two sets of statements. "
"Your analysis should be rooted in a detailed and thoughtful examination of each statement, considering not only the direct content but also the underlying implications and contexts. "
"For each statement pair, assign a score from -10 (indicating complete disagreement) to 10 (indicating complete agreement). "
"This scoring should reflect a comprehensive understanding of how the statements relate, taking into account their broader meanings and potential connections or contradictions.\n\n"
"Focus exclusively on the content and deeper meanings of the statements, avoiding any influence from ideological or philosophical biases. "
"When statements do not explicitly agree or contradict but have deeper connections or oppositions, these should be carefully considered in your scoring.\n\n"
"Examples:\n"
"'Smartphones are essential for modern communication.' and 'Most people rely heavily on technology for daily tasks.' might score high, reflecting a thematic agreement in technology reliance.\n"
"'Maintaining natural ecosystems is vital for biodiversity.' and 'Economic development should be prioritized over environmental concerns.' would likely score negatively, due to underlying opposition in priorities.\n\n"
"Please present the scores in a JSON formatted matrix, using indices for the statements from each group. Here is the format for a matrix where each group has two statements:\n"
"All responses should be formated in this sample json format:\n"
'{}"matrix": [[0, 0], [0, 0]]{}\n\n'
"This response will be used by a script, so it is of great importance that your response is nothing but just the json response, ***any text not in the json block will cause the script to fail***. \n"
"do your thought process before you generate the matrix as comments and only as comments in the json block, and please be as concise as possible to minimize tokens utilization. and cost of execution"
"Now, apply this approach to the following statements:\n"
"Group 1 has {} statements and Group 2 has {} statements.\n"
"Analyze the following statements:\n\nGroup 1:\n".format(
"{", "}", len(statements_1), len(statements_2)
)
)
for i, statement1 in enumerate(statements_1, start=1):
intro_prompt += f"{i}. {statement1}\n"
intro_prompt += "\nGroup 2:\n"
for j, statement2 in enumerate(statements_2, start=1):
intro_prompt += f"{j}. {statement2}\n"
return intro_prompt
def get_similarity_score(statements_1, statements_2):
chat_completion = openai_client.chat.completions.create(
messages=[
{
"role": "user",
"content": create_json_matrix_prompt(statements_1, statements_2),
}
],
model="gpt-3.5-turbo",
timeout=60,
)
return chat_completion.choices[0].message.content
def get_earliest_pending_query():
db_handle, client = get_db_handle()
queries_db = db_handle[QUERY_DATABASE]
earliest_pending_query = queries_db.find_one(
{"status": "pending"}, sort=[("timestamp", 1)]
)
client.close()
return earliest_pending_query
def create_statement_2_list(query):
statement_2_list = []
query_dict = {}
for category in query["query"]:
for subcategory in query["query"][category]:
statement_2_list.append(query["query"][category][subcategory])
query_dict[len(statement_2_list) - 1] = (category, subcategory)
# print(query_dict)
return statement_2_list, query_dict
def get_num_pages(page_size):
db_handle, client = get_db_handle()
tweets = db_handle[TWEETS_DATABASE]
num_tweets = tweets.count_documents({})
client.close()
return math.ceil(num_tweets / page_size)
def get_tweets(page_size, page_num):
db_handle, client = get_db_handle()
tweets = db_handle[TWEETS_DATABASE]
tweets_cursor = tweets.find({}).skip(page_size * (page_num - 1)).limit(page_size)
client.close()
return tweets_cursor
def create_statement_1_list(tweets_cursor):
statement_1_list = []
author_dict = {}
for user in tweets_cursor:
for tweet in user["tweets"]:
statement_1_list.append(tweet["content"])
author_dict[len(statement_1_list) - 1] = user["uname"]
return statement_1_list, author_dict
def compute_author_scores_by_statement_2(page_size, query):
author_scores_of_statement_2 = defaultdict(
lambda: defaultdict(lambda: defaultdict(list))
)
num_pages = get_num_pages(page_size)
for page_num in range(1, num_pages + 1):
tweets_cursor = get_tweets(page_size, page_num)
statement_1_list, author_dict = create_statement_1_list(tweets_cursor)
statement_2_list, query_dict = create_statement_2_list(query)
cnt = 0
while True:
try:
cnt += 1
similarity_score = get_similarity_score(
statement_1_list, statement_2_list
)
break
except Exception as e:
if cnt > 3:
print("[ERROR]: Exceeded 3 retries")
return None
print("Failed, retrying in 30s...")
print("[Exception]:", e)
time.sleep(30)
print("retrying...")
continue
if similarity_score.startswith("```json"):
similarity_score = similarity_score[7:-3]
# print(similarity_score)
similarity_score = json.loads(similarity_score)
for statement_1_index, statement_2_scores in enumerate(
similarity_score["matrix"]
):
for statement_2_index, score in enumerate(statement_2_scores):
author_scores_of_statement_2[author_dict[statement_1_index]][
query_dict[statement_2_index][0]
][query_dict[statement_2_index][1]].append(score)
return author_scores_of_statement_2
def average_author_scores_by_statement_2(author_scores_of_statement_2):
author_scores_by_statement_2 = defaultdict(
lambda: defaultdict(lambda: defaultdict(int))
)
for author in author_scores_of_statement_2:
for category in author_scores_of_statement_2[author]:
for subcategory in author_scores_of_statement_2[author][category]:
author_scores_by_statement_2[author][category][subcategory] = sum(
author_scores_of_statement_2[author][category][subcategory]
) / len(author_scores_of_statement_2[author][category][subcategory])
return author_scores_by_statement_2
def cluster_and_count(
unames, categories, category_index, average_author_scores_by_statement_2
):
if category_index == len(categories):
return {"result": {}, "next_category_result": {}}
result = defaultdict(lambda: 0)
next_category_result = {}
subcats = defaultdict(list)
for uname in unames:
for subcategory in average_author_scores_by_statement_2[uname][
categories[category_index]
]:
if (
average_author_scores_by_statement_2[uname][categories[category_index]][
subcategory
]
>= 0
):
result[subcategory] += 1
subcats[subcategory].append(uname)
for subcategory in subcats:
next_category_result[subcategory] = cluster_and_count(
subcats[subcategory],
categories,
category_index + 1,
average_author_scores_by_statement_2,
)
return {"result": result, "next_category_result": next_category_result}
def update_query_status(query, query_result):
db_handle, client = get_db_handle()
queries_db = db_handle[QUERY_DATABASE]
queries_db.update_one(
{"_id": query["_id"]}, {"$set": {"status": "processed", "result": query_result}}
)
client.close()
def process_query(query):
author_scores_of_statement_2 = compute_author_scores_by_statement_2(20, query)
average_author_scores_by_statement_2_res = average_author_scores_by_statement_2(
author_scores_of_statement_2
)
query_result = cluster_and_count(
average_author_scores_by_statement_2_res.keys(),
query["categories"],
0,
average_author_scores_by_statement_2_res,
)
update_query_status(query, query_result)
def execute_queries():
while True:
try:
query = get_earliest_pending_query()
if query:
print("[INFO]: Processing query - ", query["_id"])
process_query(query)
print("[SUCCESS]: Processed query - ", query["_id"])
else:
print("No pending queries")
except Exception as e:
print("[ERROR]:", e)
time.sleep(1)
if __name__ == "__main__":
execute_queries()
| [
"\nGroup 2:\n",
"PLACEHOLDER. PLACEHOLDER\n",
"As an AI, you are tasked with evaluating the level of agreement or disagreement between two sets of statements. Your analysis should be rooted in a detailed and thoughtful examination of each statement, considering not only the direct content but also the underlying implications and contexts. For each statement pair, assign a score from -10 (indicating complete disagreement) to 10 (indicating complete agreement). This scoring should reflect a comprehensive understanding of how the statements relate, taking into account their broader meanings and potential connections or contradictions.\n\nFocus exclusively on the content and deeper meanings of the statements, avoiding any influence from ideological or philosophical biases. When statements do not explicitly agree or contradict but have deeper connections or oppositions, these should be carefully considered in your scoring.\n\nExamples:\n'Smartphones are essential for modern communication.' and 'Most people rely heavily on technology for daily tasks.' might score high, reflecting a thematic agreement in technology reliance.\n'Maintaining natural ecosystems is vital for biodiversity.' and 'Economic development should be prioritized over environmental concerns.' would likely score negatively, due to underlying opposition in priorities.\n\nPlease present the scores in a JSON formatted matrix, using indices for the statements from each group. Here is the format for a matrix where each group has two statements:\nAll responses should be formated in this sample json format:\n{\"matrix\": [[0, 0], [0, 0]]}\n\nThis response will be used by a script, so it is of great importance that your response is nothing but just the json response, ***any text not in the json block will cause the script to fail***. \ndo your thought process before you generate the matrix as comments and only as comments in the json block, and please be as concise as possible to minimize tokens utilization. and cost of executionNow, apply this approach to the following statements:\nGroup 1 has 1 statements and Group 2 has 1 statements.\nAnalyze the following statements:\n\nGroup 1:\n"
] |
2024-01-10 | Mashidzasupergirl/AI-driven-SaaS-Application | app~recipe_generator.py | from openai import OpenAI
import argparse
def main():
print("Recipehelper is running!")
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i", type=str, required=True)
args = parser.parse_args()
user_input = args.input
# print("List of products from input:", user_input)
generate_recipe(user_input)
def generate_recipe(prompt: str):
client = OpenAI()
subject = prompt
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a generetor of recipe for given products"},
{"role": "user", "content": f'I have only these products: {subject}. Suggest me a recipe only for these products, I do not want to go to the store.'},
]
)
AI_answer = completion.choices[0].message
recipe = AI_answer.content
# print(recipe, 'recipe')
return recipe
if __name__ == "__main__":
main()
| [
"I have only these products: PLACEHOLDER. Suggest me a recipe only for these products, I do not want to go to the store.",
"You are a generetor of recipe for given products"
] |
2024-01-10 | agfrei/llm_chat_pdf | api~src~app~document~document.py | import re
from bs4 import BeautifulSoup
from langchain.document_loaders import PyMuPDFLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import (
MarkdownHeaderTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain.vectorstores import Chroma
from src.app.core.settings import Settings
class Document:
def __init__(self, path: str, settings: Settings):
self._path = path
self._pages = None
self._settings = settings
self._md_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=self._settings.chunk_markdown_separators,
return_each_line=False,
)
self._rct_splitter = RecursiveCharacterTextSplitter(
chunk_size=settings.chunk_size,
chunk_overlap=settings.chunk_overlap,
separators=settings.chunk_separators,
)
self.chunks = []
def load(self):
"""Load PDF into meanigful chunks.
Strategy:
1- Load PDF as HTML using PyMuPDF
2- Split each `div` and `span` into `sections` with
font-size as metadata
3- Convert into markdow using font size to infer headers
(bigger fonts = top headers, lowest font = simple text)
4- Use `MarkdownHeaderTextSplitter` to split markdown into
meanigful chunks
5- Use `RecursiveCharacterTextSplitter` to split
"""
loader = PyMuPDFLoader(file_path=self._path)
self._pages = loader.load(option="html")
html_sections = []
font_sizes = set()
for page in self._pages:
s, fs = self.__split_html_sections(page)
html_sections.extend(s)
font_sizes = font_sizes.union(fs)
markdown = self.__get_markdown(html_sections, font_sizes)
chunks = self._md_splitter.split_text(markdown)
self.chunks = []
for i, chunk in enumerate(chunks):
smaller_chunks = self._rct_splitter.split_documents([chunk])
for j, c in enumerate(smaller_chunks):
header_append = (
"| "
+ " ".join(
[
c.metadata.get(header, "")
for _, header in self._settings.chunk_markdown_separators # noqa: E501
]
).strip()
+ " |"
)
if header_append:
c.page_content = header_append + " " + c.page_content
c.metadata["md_section"] = i + 1
c.metadata["total_md_sections"] = len(chunks)
c.metadata["chunk_split"] = j + 1
c.metadata["total_chunk_splits"] = len(smaller_chunks)
self.chunks.append(c)
self.__add_to_vector_db()
def __split_html_sections(self, page):
soup = BeautifulSoup(page.page_content, "html.parser")
content = soup.find_all("div")
current_font_size = None
current_text = ""
snippets = []
font_sizes = set()
for c in content:
span = c.find("span")
if not span:
continue
while span:
style = span.get("style")
if not style:
span = span.findNext()
continue
font_size = re.findall(
r"font-size:(\d+|\d+\.\d+)(pt|px)", style
)
if not font_size:
span = span.findNext()
continue
font_size = int(float(font_size[0][0]))
font_sizes.add(font_size)
if not current_font_size:
current_font_size = font_size
if font_size == current_font_size:
current_text += span.text + "\n"
else:
snippets.append((current_text, current_font_size))
current_font_size = font_size
current_text = span.text + "\n"
span = span.findNext()
snippets.append((current_text, current_font_size))
return snippets, font_sizes
def __get_markdown(self, snippets: list, font_sizes: set):
font_sizes = sorted(list(font_sizes), reverse=True)
formatter = {}
for i, size in enumerate(font_sizes):
if i == len(font_sizes) - 1:
format = ""
else:
format = (i + 1) * "#" + " "
formatter[size] = format
formatter
snippets = [(formatter[s[1]] + s[0], s[1]) for s in snippets]
markdown = ""
for s in snippets:
markdown += s[0]
return markdown
def __add_to_vector_db(self):
embedding = OpenAIEmbeddings(
openai_api_key=self._settings.openai_api_key
)
_ = Chroma.from_documents(
self.chunks,
embedding,
persist_directory=self._settings.chroma_persist_directory,
)
| [] |
2024-01-10 | chenghuige/tensorforce | tensorforce~core~optimizers~kfac.py | # Copyright 2018 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#code refractored from openai/baselines (https://github.com/openai/baselines)
import tensorflow as tf
import numpy as np
import re
from tensorforce.core.optimizers.kfac_utils import *
from tensorforce.core.optimizers import Optimizer
from functools import reduce
KFAC_OPS = ['MatMul', 'Conv2D', 'BiasAdd']
class KFAC(Optimizer):
"""
A non-layers implementation of the Kronecker-factored approximate curvature optimizer.
Meant for usage with Tensorforce.
"""
def __init__(
self,
learning_rate=0.01,
momentum=0.9,
clip_kl=0.01,
kfac_update=2,
stats_accum_iter=60,
full_stats_init=False,
cold_iter=100,
cold_lr=None,
async_=False,
async_stats=False,
epsilon=1e-2,
stats_decay=0.95,
blockdiag_bias=False,
channel_fac=False,
factored_damping=False,
approxT2=False,
use_float64=False,
weight_decay_dict={},
max_grad_norm=0.5,
scope='kfac',
summary_labels=()
):
"""
Initializes a KFAC optimizer.
For more information on arguments, see the Kfac Optimization paper https://arxiv.org/pdf/1503.05671.pdf
"""
self.max_grad_norm = max_grad_norm
self._lr = learning_rate
self._momentum = momentum
self._clip_kl = clip_kl
self._channel_fac = channel_fac
self._kfac_update = kfac_update
self._async = async_
self._async_stats = async_stats
self._epsilon = epsilon
self._stats_decay = stats_decay
self._blockdiag_bias = blockdiag_bias
self._approxT2 = approxT2
self._use_float64 = use_float64
self._factored_damping = factored_damping
self._cold_iter = cold_iter
if cold_lr == None:
# good heuristics
self._cold_lr = self._lr# * 3.
else:
self._cold_lr = cold_lr
self._stats_accum_iter = stats_accum_iter
self._weight_decay_dict = weight_decay_dict
self._diag_init_coeff = 0.
self._full_stats_init = full_stats_init
if not self._full_stats_init:
self._stats_accum_iter = self._cold_iter
self.sgd_step = tf.Variable(0, name='KFAC/sgd_step', trainable=False)
self.global_step = tf.Variable(
0, name='KFAC/global_step', trainable=False)
self.cold_step = tf.Variable(0, name='KFAC/cold_step', trainable=False)
self.factor_step = tf.Variable(
0, name='KFAC/factor_step', trainable=False)
self.stats_step = tf.Variable(
0, name='KFAC/stats_step', trainable=False)
self.vFv = tf.Variable(0., name='KFAC/vFv', trainable=False)
self.factors = {}
self.param_vars = []
self.stats = {}
self.stats_eigen = {}
super(KFAC, self).__init__(scope=scope, summary_labels=summary_labels)
def getFactors(self, g, varlist):
graph = tf.get_default_graph()
factorTensors = {}
fpropTensors = []
bpropTensors = []
opTypes = []
fops = []
def searchFactors(gradient, graph):
# hard coded search stratergy
bpropOp = gradient.op
bpropOp_name = bpropOp.name
bTensors = []
fTensors = []
# combining additive gradient, assume they are the same op type and
# indepedent
if 'AddN' in bpropOp_name:
factors = []
for g in gradient.op.inputs:
factors.append(searchFactors(g, graph))
op_names = [item['opName'] for item in factors]
# TO-DO: need to check all the attribute of the ops as well
print (gradient.name)
print (op_names)
print (len(np.unique(op_names)))
assert len(np.unique(op_names)) == 1, gradient.name + \
' is shared among different computation OPs'
bTensors = reduce(lambda x, y: x + y,
[item['bpropFactors'] for item in factors])
if len(factors[0]['fpropFactors']) > 0:
fTensors = reduce(
lambda x, y: x + y, [item['fpropFactors'] for item in factors])
fpropOp_name = op_names[0]
fpropOp = factors[0]['op']
else:
fpropOp_name = re.search(
'gradientsSampled(_[0-9]+|)/(.+?)_grad', bpropOp_name).group(2)
fpropOp = graph.get_operation_by_name(fpropOp_name)
if fpropOp.op_def.name in KFAC_OPS:
# Known OPs
###
bTensor = [
i for i in bpropOp.inputs if 'gradientsSampled' in i.name][-1]
bTensorShape = fpropOp.outputs[0].get_shape()
if bTensor.get_shape()[0].value == None:
bTensor.set_shape(bTensorShape)
bTensors.append(bTensor)
###
if fpropOp.op_def.name == 'BiasAdd':
fTensors = []
else:
fTensors.append(
[i for i in fpropOp.inputs if param.op.name not in i.name][0])
fpropOp_name = fpropOp.op_def.name
else:
# unknown OPs, block approximation used
bInputsList = [i for i in bpropOp.inputs[
0].op.inputs if 'gradientsSampled' in i.name if 'Shape' not in i.name]
if len(bInputsList) > 0:
bTensor = bInputsList[0]
bTensorShape = fpropOp.outputs[0].get_shape()
if len(bTensor.get_shape()) > 0 and bTensor.get_shape()[0].value == None:
bTensor.set_shape(bTensorShape)
bTensors.append(bTensor)
fpropOp_name = opTypes.append('UNK-' + fpropOp.op_def.name)
return {'opName': fpropOp_name, 'op': fpropOp, 'fpropFactors': fTensors, 'bpropFactors': bTensors}
for t, param in zip(g, varlist):
factors = searchFactors(t, graph)
factorTensors[param] = factors
########
# check associated weights and bias for homogeneous coordinate representation
# and check redundent factors
# TO-DO: there may be a bug to detect associate bias and weights for
# forking layer, e.g. in inception models.
for param in varlist:
factorTensors[param]['assnWeights'] = None
factorTensors[param]['assnBias'] = None
for param in varlist:
if factorTensors[param]['opName'] == 'BiasAdd':
factorTensors[param]['assnWeights'] = None
for item in varlist:
if len(factorTensors[item]['bpropFactors']) > 0:
if (set(factorTensors[item]['bpropFactors']) == set(factorTensors[param]['bpropFactors'])) and (len(factorTensors[item]['fpropFactors']) > 0):
factorTensors[param]['assnWeights'] = item
factorTensors[item]['assnBias'] = param
factorTensors[param]['bpropFactors'] = factorTensors[
item]['bpropFactors']
########
########
# concatenate the additive gradients along the batch dimension, i.e.
# assuming independence structure
for key in ['fpropFactors', 'bpropFactors']:
for i, param in enumerate(varlist):
if len(factorTensors[param][key]) > 0:
if (key + '_concat') not in factorTensors[param]:
name_scope = factorTensors[param][key][0].name.split(':')[
0]
with tf.name_scope(name_scope):
factorTensors[param][
key + '_concat'] = tf.concat(factorTensors[param][key], 0)
else:
factorTensors[param][key + '_concat'] = None
for j, param2 in enumerate(varlist[(i + 1):]):
if (len(factorTensors[param][key]) > 0) and (set(factorTensors[param2][key]) == set(factorTensors[param][key])):
factorTensors[param2][key] = factorTensors[param][key]
factorTensors[param2][
key + '_concat'] = factorTensors[param][key + '_concat']
########
self.factors = factorTensors
return factorTensors
def getStats(self, factors, varlist):
if len(self.stats) == 0:
# initialize stats variables on CPU because eigen decomp is
# computed on CPU
with tf.device('/cpu'):
tmpStatsCache = {}
# search for tensor factors and
# use block diag approx for the bias units
for var in varlist:
fpropFactor = factors[var]['fpropFactors_concat']
bpropFactor = factors[var]['bpropFactors_concat']
opType = factors[var]['opName']
if opType == 'Conv2D':
Kh = var.get_shape()[0]
Kw = var.get_shape()[1]
C = fpropFactor.get_shape()[-1]
Oh = bpropFactor.get_shape()[1]
Ow = bpropFactor.get_shape()[2]
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels do not support
# homogeneous coordinate
var_assnBias = factors[var]['assnBias']
if var_assnBias:
factors[var]['assnBias'] = None
factors[var_assnBias]['assnWeights'] = None
##
for var in varlist:
fpropFactor = factors[var]['fpropFactors_concat']
bpropFactor = factors[var]['bpropFactors_concat']
opType = factors[var]['opName']
self.stats[var] = {'opName': opType,
'fprop_concat_stats': [],
'bprop_concat_stats': [],
'assnWeights': factors[var]['assnWeights'],
'assnBias': factors[var]['assnBias'],
}
if fpropFactor is not None:
if fpropFactor not in tmpStatsCache:
if opType == 'Conv2D':
Kh = var.get_shape()[0]
Kw = var.get_shape()[1]
C = fpropFactor.get_shape()[-1]
Oh = bpropFactor.get_shape()[1]
Ow = bpropFactor.get_shape()[2]
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels
# assume independence between input channels and spatial
# 2K-1 x 2K-1 covariance matrix and C x C covariance matrix
# factorization along the channels do not
# support homogeneous coordinate, assnBias
# is always None
fpropFactor2_size = Kh * Kw
slot_fpropFactor_stats2 = tf.Variable(tf.diag(tf.ones(
[fpropFactor2_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name, trainable=False)
self.stats[var]['fprop_concat_stats'].append(
slot_fpropFactor_stats2)
fpropFactor_size = C
else:
# 2K-1 x 2K-1 x C x C covariance matrix
# assume BHWC
fpropFactor_size = Kh * Kw * C
else:
# D x D covariance matrix
fpropFactor_size = fpropFactor.get_shape()[-1]
# use homogeneous coordinate
if not self._blockdiag_bias and self.stats[var]['assnBias']:
fpropFactor_size += 1
slot_fpropFactor_stats = tf.Variable(tf.diag(tf.ones(
[fpropFactor_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name, trainable=False)
self.stats[var]['fprop_concat_stats'].append(
slot_fpropFactor_stats)
if opType != 'Conv2D':
tmpStatsCache[fpropFactor] = self.stats[
var]['fprop_concat_stats']
else:
self.stats[var][
'fprop_concat_stats'] = tmpStatsCache[fpropFactor]
if bpropFactor is not None:
# no need to collect backward stats for bias vectors if
# using homogeneous coordinates
if not((not self._blockdiag_bias) and self.stats[var]['assnWeights']):
if bpropFactor not in tmpStatsCache:
slot_bpropFactor_stats = tf.Variable(tf.diag(tf.ones([bpropFactor.get_shape(
)[-1]])) * self._diag_init_coeff, name='KFAC_STATS/' + bpropFactor.op.name, trainable=False)
self.stats[var]['bprop_concat_stats'].append(
slot_bpropFactor_stats)
tmpStatsCache[bpropFactor] = self.stats[
var]['bprop_concat_stats']
else:
self.stats[var][
'bprop_concat_stats'] = tmpStatsCache[bpropFactor]
return self.stats
def compute_and_apply_stats(self, loss_sampled, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
stats = self.compute_stats(loss_sampled, var_list=varlist)
return self.apply_stats(stats)
def compute_stats(self, loss_sampled, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
gs = tf.gradients(loss_sampled, varlist, name='gradientsSampled')
self.gs = gs
factors = self.getFactors(gs, varlist)
stats = self.getStats(factors, varlist)
updateOps = []
statsUpdates = {}
statsUpdates_cache = {}
for var in varlist:
opType = factors[var]['opName']
fops = factors[var]['op']
fpropFactor = factors[var]['fpropFactors_concat']
fpropStats_vars = stats[var]['fprop_concat_stats']
bpropFactor = factors[var]['bpropFactors_concat']
bpropStats_vars = stats[var]['bprop_concat_stats']
SVD_factors = {}
for stats_var in fpropStats_vars:
stats_var_dim = int(stats_var.get_shape()[0])
if stats_var not in statsUpdates_cache:
old_fpropFactor = fpropFactor
B = (tf.shape(fpropFactor)[0]) # batch size
if opType == 'Conv2D':
strides = fops.get_attr("strides")
padding = fops.get_attr("padding")
convkernel_size = var.get_shape()[0:3]
KH = int(convkernel_size[0])
KW = int(convkernel_size[1])
C = int(convkernel_size[2])
flatten_size = int(KH * KW * C)
Oh = int(bpropFactor.get_shape()[1])
Ow = int(bpropFactor.get_shape()[2])
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels
# assume independence among input channels
# factor = B x 1 x 1 x (KH xKW x C)
# patches = B x Oh x Ow x (KH xKW x C)
if len(SVD_factors) == 0:
# find closest rank-1 approx to the feature map
S, U, V = tf.batch_svd(tf.reshape(
fpropFactor, [-1, KH * KW, C]))
# get rank-1 approx slides
sqrtS1 = tf.expand_dims(tf.sqrt(S[:, 0, 0]), 1)
patches_k = U[:, :, 0] * sqrtS1 # B x KH*KW
full_factor_shape = fpropFactor.get_shape()
patches_k.set_shape(
[full_factor_shape[0], KH * KW])
patches_c = V[:, :, 0] * sqrtS1 # B x C
patches_c.set_shape([full_factor_shape[0], C])
SVD_factors[C] = patches_c
SVD_factors[KH * KW] = patches_k
fpropFactor = SVD_factors[stats_var_dim]
else:
# poor mem usage implementation
patches = tf.extract_image_patches(fpropFactor, ksizes=[1, convkernel_size[
0], convkernel_size[1], 1], strides=strides, rates=[1, 1, 1, 1], padding=padding)
if self._approxT2:
# T^2 terms * 1/T^2, size: B x C
fpropFactor = tf.reduce_mean(patches, [1, 2])
else:
# size: (B x Oh x Ow) x C
fpropFactor = tf.reshape(
patches, [-1, flatten_size]) / Oh / Ow
fpropFactor_size = int(fpropFactor.get_shape()[-1])
if stats_var_dim == (fpropFactor_size + 1) and not self._blockdiag_bias:
if opType == 'Conv2D' and not self._approxT2:
# correct padding for numerical stability (we
# divided out OhxOw from activations for T1 approx)
fpropFactor = tf.concat([fpropFactor, tf.ones(
[tf.shape(fpropFactor)[0], 1]) / Oh / Ow], 1)
else:
# use homogeneous coordinates
fpropFactor = tf.concat(
[fpropFactor, tf.ones([tf.shape(fpropFactor)[0], 1])], 1)
# average over the number of data points in a batch
# divided by B
cov = tf.matmul(fpropFactor, fpropFactor,
transpose_a=True) / tf.cast(B, tf.float32)
updateOps.append(cov)
statsUpdates[stats_var] = cov
if opType != 'Conv2D':
# HACK: for convolution we recompute fprop stats for
# every layer including forking layers
statsUpdates_cache[stats_var] = cov
for stats_var in bpropStats_vars:
stats_var_dim = int(stats_var.get_shape()[0])
if stats_var not in statsUpdates_cache:
old_bpropFactor = bpropFactor
bpropFactor_shape = bpropFactor.get_shape()
B = tf.shape(bpropFactor)[0] # batch size
C = int(bpropFactor_shape[-1]) # num channels
if opType == 'Conv2D' or len(bpropFactor_shape) == 4:
if fpropFactor is not None:
if self._approxT2:
bpropFactor = tf.reduce_sum(
bpropFactor, [1, 2]) # T^2 terms * 1/T^2
else:
bpropFactor = tf.reshape(
bpropFactor, [-1, C]) * Oh * Ow # T * 1/T terms
else:
# just doing block diag approx. spatial independent
# structure does not apply here. summing over
# spatial locations
bpropFactor = tf.reduce_sum(bpropFactor, [1, 2])
# assume sampled loss is averaged. TO-DO:figure out better
# way to handle this
bpropFactor *= tf.to_float(B)
##
cov_b = tf.matmul(
bpropFactor, bpropFactor, transpose_a=True) / tf.to_float(tf.shape(bpropFactor)[0])
updateOps.append(cov_b)
statsUpdates[stats_var] = cov_b
statsUpdates_cache[stats_var] = cov_b
self.statsUpdates = statsUpdates
return statsUpdates
def apply_stats(self, statsUpdates):
""" compute stats and update/apply the new stats to the running average
"""
def updateAccumStats():
if self._full_stats_init:
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op)
else:
return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))
def updateRunningAvgStats(statsUpdates, fac_iter=1):
# return tf.cond(tf.greater_equal(self.factor_step,
# tf.convert_to_tensor(fac_iter)), lambda:
# tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
return tf.group(*self._apply_stats(statsUpdates))
if self._async_stats:
# asynchronous stats update
update_stats = self._apply_stats(statsUpdates)
queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
item.get_shape() for item in update_stats])
enqueue_op = queue.enqueue(update_stats)
def dequeue_stats_op():
return queue.dequeue()
self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
else:
# synchronous stats update
update_stats_op = tf.cond(tf.greater_equal(
self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
self._update_stats_op = update_stats_op
return update_stats_op
def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.):
updateOps = []
# obtain the stats var list
for stats_var in statsUpdates:
stats_new = statsUpdates[stats_var]
if accumulate:
# simple superbatch averaging
update_op = tf.assign_add(
stats_var, accumulateCoeff * stats_new, use_locking=True)
else:
# exponential running averaging
update_op = tf.assign(
stats_var, stats_var * self._stats_decay, use_locking=True)
update_op = tf.assign_add(
update_op, (1. - self._stats_decay) * stats_new, use_locking=True)
updateOps.append(update_op)
with tf.control_dependencies(updateOps):
stats_step_op = tf.assign_add(self.stats_step, 1)
return [stats_step_op, ]
def getStatsEigen(self, stats=None):
if len(self.stats_eigen) == 0:
stats_eigen = {}
if stats is None:
stats = self.stats
tmpEigenCache = {}
with tf.device('/cpu:0'):
for var in stats:
for key in ['fprop_concat_stats', 'bprop_concat_stats']:
for stats_var in stats[var][key]:
if stats_var not in tmpEigenCache:
stats_dim = stats_var.get_shape()[1].value
e = tf.Variable(tf.ones(
[stats_dim]), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/e', trainable=False)
Q = tf.Variable(tf.diag(tf.ones(
[stats_dim])), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/Q', trainable=False)
stats_eigen[stats_var] = {'e': e, 'Q': Q}
tmpEigenCache[
stats_var] = stats_eigen[stats_var]
else:
stats_eigen[stats_var] = tmpEigenCache[
stats_var]
self.stats_eigen = stats_eigen
return self.stats_eigen
def computeStatsEigen(self):
""" compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue """
# TO-DO: figure out why this op has delays (possibly moving
# eigenvectors around?)
with tf.device('/cpu:0'):
def removeNone(tensor_list):
local_list = []
for item in tensor_list:
if item is not None:
local_list.append(item)
return local_list
def copyStats(var_list):
print("copying stats to buffer tensors before eigen decomp")
redundant_stats = {}
copied_list = []
for item in var_list:
if item is not None:
if item not in redundant_stats:
if self._use_float64:
redundant_stats[item] = tf.cast(
tf.identity(item), tf.float64)
else:
redundant_stats[item] = tf.identity(item)
copied_list.append(redundant_stats[item])
else:
copied_list.append(None)
return copied_list
#stats = [copyStats(self.fStats), copyStats(self.bStats)]
#stats = [self.fStats, self.bStats]
stats_eigen = self.stats_eigen
computedEigen = {}
eigen_reverse_lookup = {}
updateOps = []
# sync copied stats
# with tf.control_dependencies(removeNone(stats[0]) +
# removeNone(stats[1])):
with tf.control_dependencies([]):
for stats_var in stats_eigen:
if stats_var not in computedEigen:
eigens = tf.self_adjoint_eig(stats_var)
e = eigens[0]
Q = eigens[1]
if self._use_float64:
e = tf.cast(e, tf.float32)
Q = tf.cast(Q, tf.float32)
updateOps.append(e)
updateOps.append(Q)
computedEigen[stats_var] = {'e': e, 'Q': Q}
eigen_reverse_lookup[e] = stats_eigen[stats_var]['e']
eigen_reverse_lookup[Q] = stats_eigen[stats_var]['Q']
self.eigen_reverse_lookup = eigen_reverse_lookup
self.eigen_update_list = updateOps
return updateOps
def applyStatsEigen(self, eigen_list):
updateOps = []
print(('updating %d eigenvalue/vectors' % len(eigen_list)))
for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)):
stats_eigen_var = self.eigen_reverse_lookup[mark]
updateOps.append(
tf.assign(stats_eigen_var, tensor, use_locking=True))
with tf.control_dependencies(updateOps):
factor_step_op = tf.assign_add(self.factor_step, 1)
updateOps.append(factor_step_op)
return updateOps
def getKfacPrecondUpdates(self, gradlist, varlist):
updatelist = []
vg = 0.
assert len(self.stats) > 0
assert len(self.stats_eigen) > 0
assert len(self.factors) > 0
counter = 0
grad_dict = {var: grad for grad, var in zip(gradlist, varlist)}
for grad, var in zip(gradlist, varlist):
GRAD_RESHAPE = False
GRAD_TRANSPOSE = False
fpropFactoredFishers = self.stats[var]['fprop_concat_stats']
bpropFactoredFishers = self.stats[var]['bprop_concat_stats']
if (len(fpropFactoredFishers) + len(bpropFactoredFishers)) > 0:
counter += 1
GRAD_SHAPE = grad.get_shape()
if len(grad.get_shape()) > 2:
# reshape conv kernel parameters
KW = int(grad.get_shape()[0])
KH = int(grad.get_shape()[1])
C = int(grad.get_shape()[2])
D = int(grad.get_shape()[3])
if len(fpropFactoredFishers) > 1 and self._channel_fac:
# reshape conv kernel parameters into tensor
grad = tf.reshape(grad, [KW * KH, C, D])
else:
# reshape conv kernel parameters into 2D grad
grad = tf.reshape(grad, [-1, D])
GRAD_RESHAPE = True
elif len(grad.get_shape()) == 1:
# reshape bias or 1D parameters
D = int(grad.get_shape()[0])
grad = tf.expand_dims(grad, 0)
GRAD_RESHAPE = True
else:
# 2D parameters
C = int(grad.get_shape()[0])
D = int(grad.get_shape()[1])
if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias:
# use homogeneous coordinates only works for 2D grad.
# TO-DO: figure out how to factorize bias grad
# stack bias grad
var_assnBias = self.stats[var]['assnBias']
grad = tf.concat(
[grad, tf.expand_dims(grad_dict[var_assnBias], 0)], 0)
# project gradient to eigen space and reshape the eigenvalues
# for broadcasting
eigVals = []
for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
e = detectMinVal(self.stats_eigen[stats][
'e'], var, name='act', debug=False)
Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='act')
eigVals.append(e)
grad = gmatmul(Q, grad, transpose_a=True, reduce_dim=idx)
for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
e = detectMinVal(self.stats_eigen[stats][
'e'], var, name='grad', debug=False)
Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='grad')
eigVals.append(e)
grad = gmatmul(grad, Q, transpose_b=False, reduce_dim=idx)
##
#####
# whiten using eigenvalues
weightDecayCoeff = 0.
if var in self._weight_decay_dict:
weightDecayCoeff = self._weight_decay_dict[var]
if self._factored_damping:
coeffs = 1.
num_factors = len(eigVals)
# compute the ratio of two trace norm of the left and right
# KFac matrices, and their generalization
if len(eigVals) == 1:
damping = self._epsilon + weightDecayCoeff
else:
damping = tf.pow(
self._epsilon + weightDecayCoeff, 1. / num_factors)
eigVals_tnorm_avg = [tf.reduce_mean(
tf.abs(e)) for e in eigVals]
for e, e_tnorm in zip(eigVals, eigVals_tnorm_avg):
eig_tnorm_negList = [
item for item in eigVals_tnorm_avg if item != e_tnorm]
if len(eigVals) == 1:
adjustment = 1.
elif len(eigVals) == 2:
adjustment = tf.sqrt(
e_tnorm / eig_tnorm_negList[0])
else:
eig_tnorm_negList_prod = reduce(
lambda x, y: x * y, eig_tnorm_negList)
adjustment = tf.pow(
tf.pow(e_tnorm, num_factors - 1.) / eig_tnorm_negList_prod, 1. / num_factors)
coeffs *= (e + adjustment * damping)
else:
coeffs = 1.
damping = (self._epsilon + weightDecayCoeff)
for e in eigVals:
coeffs *= e
coeffs += damping
#grad = tf.Print(grad, [tf.convert_to_tensor('1'), tf.convert_to_tensor(var.name), grad.get_shape()])
grad /= coeffs
#grad = tf.Print(grad, [tf.convert_to_tensor('2'), tf.convert_to_tensor(var.name), grad.get_shape()])
#####
# project gradient back to euclidean space
for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
grad = gmatmul(Q, grad, transpose_a=False, reduce_dim=idx)
for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
grad = gmatmul(grad, Q, transpose_b=True, reduce_dim=idx)
##
#grad = tf.Print(grad, [tf.convert_to_tensor('3'), tf.convert_to_tensor(var.name), grad.get_shape()])
if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias:
# use homogeneous coordinates only works for 2D grad.
# TO-DO: figure out how to factorize bias grad
# un-stack bias grad
var_assnBias = self.stats[var]['assnBias']
C_plus_one = int(grad.get_shape()[0])
grad_assnBias = tf.reshape(tf.slice(grad,
begin=[
C_plus_one - 1, 0],
size=[1, -1]), var_assnBias.get_shape())
grad_assnWeights = tf.slice(grad,
begin=[0, 0],
size=[C_plus_one - 1, -1])
grad_dict[var_assnBias] = grad_assnBias
grad = grad_assnWeights
#grad = tf.Print(grad, [tf.convert_to_tensor('4'), tf.convert_to_tensor(var.name), grad.get_shape()])
if GRAD_RESHAPE:
grad = tf.reshape(grad, GRAD_SHAPE)
grad_dict[var] = grad
print(('projecting %d gradient matrices' % counter))
for g, var in zip(gradlist, varlist):
grad = grad_dict[var]
### clipping ###
tf.Print(grad, [tf.sqrt(tf.reduce_sum(tf.pow(grad, 2)))], "Euclidean norm of new grad")
local_vg = tf.reduce_sum(grad * g * (self._lr * self._lr))
vg += local_vg
# rescale everything
scaling = tf.minimum(1., tf.sqrt(self._clip_kl / vg))
with tf.control_dependencies([tf.assign(self.vFv, vg)]):
updatelist = [grad_dict[var] for var in varlist]
for i, item in enumerate(updatelist):
updatelist[i] = scaling * item
return updatelist
def compute_gradients(self, loss, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
g = tf.gradients(loss, varlist)
return [(a, b) for a, b in zip(g, varlist)]
def apply_gradients_kfac(self, grads):
g, varlist = list(zip(*grads))
if len(self.stats_eigen) == 0:
self.getStatsEigen()
qr = None
# launch eigen-decomp on a queue thread
if self._async:
print('Use async eigen decomp')
# get a list of factor loading tensors
factorOps_dummy = self.computeStatsEigen()
# define a queue for the list of factor loading tensors
queue = tf.FIFOQueue(1, [item.dtype for item in factorOps_dummy], shapes=[
item.get_shape() for item in factorOps_dummy])
enqueue_op = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update), tf.convert_to_tensor(
0)), tf.greater_equal(self.stats_step, self._stats_accum_iter)), lambda: queue.enqueue(self.computeStatsEigen()), tf.no_op)
def dequeue_op():
return queue.dequeue()
qr = tf.train.QueueRunner(queue, [enqueue_op])
updateOps = []
global_step_op = tf.assign_add(self.global_step, 1)
updateOps.append(global_step_op)
with tf.control_dependencies([global_step_op]):
# compute updates
assert self._update_stats_op != None
updateOps.append(self._update_stats_op)
dependency_list = []
if not self._async:
dependency_list.append(self._update_stats_op)
with tf.control_dependencies(dependency_list):
def no_op_wrapper():
return tf.group(*[tf.assign_add(self.cold_step, 1)])
if not self._async:
# synchronous eigen-decomp updates
updateFactorOps = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update),
tf.convert_to_tensor(0)),
tf.greater_equal(self.stats_step, self._stats_accum_iter)), lambda: tf.group(*self.applyStatsEigen(self.computeStatsEigen())), no_op_wrapper)
else:
# asynchronous eigen-decomp updates using queue
updateFactorOps = tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter),
lambda: tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(0)),
tf.no_op,
lambda: tf.group(
*self.applyStatsEigen(dequeue_op())),
),
no_op_wrapper)
updateOps.append(updateFactorOps)
with tf.control_dependencies([updateFactorOps]):
def gradOp():
return list(g)
def getKfacGradOp():
return self.getKfacPrecondUpdates(g, varlist)
u = tf.cond(tf.greater(self.factor_step,
tf.convert_to_tensor(0)), getKfacGradOp, gradOp)
optim = tf.train.MomentumOptimizer(
self._lr * (1. - self._momentum), self._momentum)
#optim = tf.train.AdamOptimizer(self._lr, epsilon=0.01)
def optimOp():
def updateOptimOp():
if self._full_stats_init:
return tf.cond(tf.greater(self.factor_step, tf.convert_to_tensor(0)), lambda: optim.apply_gradients(list(zip(u, varlist))), tf.no_op)
else:
return optim.apply_gradients(list(zip(u, varlist)))
if self._full_stats_init:
return tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), updateOptimOp, tf.no_op)
else:
return tf.cond(tf.greater_equal(self.sgd_step, self._cold_iter), updateOptimOp, tf.no_op)
updateOps.append(optimOp())
return tf.group(*updateOps), qr
def apply_gradients(self, grads):
coldOptim = tf.train.MomentumOptimizer(
self._cold_lr, self._momentum)
def coldSGDstart():
sgd_grads, sgd_var = zip(*grads)
if self.max_grad_norm != None:
sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads,self.max_grad_norm)
sgd_grads = list(zip(sgd_grads,sgd_var))
sgd_step_op = tf.assign_add(self.sgd_step, 1)
coldOptim_op = coldOptim.apply_gradients(sgd_grads)
return tf.group(*[sgd_step_op, coldOptim_op])
kfacOptim_op, qr = self.apply_gradients_kfac(grads)
def warmKFACstart():
return kfacOptim_op
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr
def minimize_(self, loss, loss_sampled, var_list=None):
grads = self.compute_gradients(loss, var_list=var_list)
update_stats_op = self.compute_and_apply_stats(
loss_sampled, var_list=var_list)
return self.apply_gradients(grads)
def tf_step(self, time, variables, **kwargs):
"""
Creates the TensorFlow operations for performing an optimization step on the given variables, including
actually changing the values of the variables.
Args:
time: Time tensor. Not used for this optimizer.
variables: List of variables to optimize.
**kwargs:
fn_loss : loss function tensor to differentiate.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
fn_loss = kwargs["fn_loss"]
if variables is None:
variables = tf.trainable_variables
return tf.gradients(fn_loss, variables)
def apply_step(self, variables, deltas, loss_sampled):
"""
Applies the given (and already calculated) step deltas to the variable values.
Args:
variables: List of variables.
deltas: List of deltas of same length.
loss_sampled : the sampled loss
Returns:
The step-applied operation. A tf.group of tf.assign_add ops.
"""
update_stats_op = self.compute_and_apply_stats(
loss_sampled, var_list=var_list)
grads = [(a, b) for a, b in zip(deltas, varlist)]
kfacOptim, _ = self.apply_gradients_kfac(grads)
return kfacOptim
def minimize(self, time, variables, **kwargs):
"""
Performs an optimization step.
Args:
time: Time tensor. Not used for this
variables: List of variables to optimize.
**kwargs:
fn_loss : loss function tensor that is differentiated
sampled_loss : the sampled loss from running the model.
Returns:
The optimization operation.
"""
loss = kwargs["fn_loss"]
sampled_loss = kwargs["sampled_loss"]
min_op, _ = self.minimize_(loss, sampled_loss, var_list=variables)
return min_op
| [] |
2024-01-10 | chenghuige/tensorforce | tensorforce~core~optimizers~kfac_utils.py | # Copyright 2018 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#code refractored from openai/baselines
import tensorflow as tf
def gmatmul(a, b, transpose_a=False, transpose_b=False, reduce_dim=None):
assert reduce_dim is not None
# weird batch matmul
if len(a.get_shape()) == 2 and len(b.get_shape()) > 2:
# reshape reduce_dim to the left most dim in b
b_shape = b.get_shape()
if reduce_dim != 0:
b_dims = list(range(len(b_shape)))
b_dims.remove(reduce_dim)
b_dims.insert(0, reduce_dim)
b = tf.transpose(b, b_dims)
b_t_shape = b.get_shape()
b = tf.reshape(b, [int(b_shape[reduce_dim]), -1])
result = tf.matmul(a, b, transpose_a=transpose_a,
transpose_b=transpose_b)
result = tf.reshape(result, b_t_shape)
if reduce_dim != 0:
b_dims = list(range(len(b_shape)))
b_dims.remove(0)
b_dims.insert(reduce_dim, 0)
result = tf.transpose(result, b_dims)
return result
elif len(a.get_shape()) > 2 and len(b.get_shape()) == 2:
# reshape reduce_dim to the right most dim in a
a_shape = a.get_shape()
outter_dim = len(a_shape) - 1
reduce_dim = len(a_shape) - reduce_dim - 1
if reduce_dim != outter_dim:
a_dims = list(range(len(a_shape)))
a_dims.remove(reduce_dim)
a_dims.insert(outter_dim, reduce_dim)
a = tf.transpose(a, a_dims)
a_t_shape = a.get_shape()
a = tf.reshape(a, [-1, int(a_shape[reduce_dim])])
result = tf.matmul(a, b, transpose_a=transpose_a,
transpose_b=transpose_b)
result = tf.reshape(result, a_t_shape)
if reduce_dim != outter_dim:
a_dims = list(range(len(a_shape)))
a_dims.remove(outter_dim)
a_dims.insert(reduce_dim, outter_dim)
result = tf.transpose(result, a_dims)
return result
elif len(a.get_shape()) == 2 and len(b.get_shape()) == 2:
return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
assert False, 'something went wrong'
def clipoutNeg(vec, threshold=1e-6):
mask = tf.cast(vec > threshold, tf.float32)
return mask * vec
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):
eigen_min = tf.reduce_min(input_mat)
eigen_max = tf.reduce_max(input_mat)
eigen_ratio = eigen_max / eigen_min
input_mat_clipped = clipoutNeg(input_mat, threshold)
if debug:
input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(
input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))
return input_mat_clipped
def factorReshape(Q, e, grad, facIndx=0, ftype='act'):
grad_shape = grad.get_shape()
if ftype == 'act':
assert e.get_shape()[0] == grad_shape[facIndx]
expanded_shape = [1, ] * len(grad_shape)
expanded_shape[facIndx] = -1
e = tf.reshape(e, expanded_shape)
if ftype == 'grad':
assert e.get_shape()[0] == grad_shape[len(grad_shape) - facIndx - 1]
expanded_shape = [1, ] * len(grad_shape)
expanded_shape[len(grad_shape) - facIndx - 1] = -1
e = tf.reshape(e, expanded_shape)
return Q, e
| [] |
2024-01-10 | jannawro/AIfred | aifred~chains~router.py | from langchain_community.chat_models.openai import ChatOpenAI
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from chains.action import fake_action_chain
from chains.query import fake_query_chain
from chains.general import general_chain
categorizer_chain = (
{"user_input": RunnablePassthrough()}
| ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template_file(
template_file="./sys_input_categorizer.yaml", input_variables=[]
),
HumanMessagePromptTemplate.from_template("{user_input}"),
]
)
| ChatOpenAI(
model="gpt-3.5-turbo",
temperature=0.0,
max_tokens=1,
)
| StrOutputParser()
)
def category_router(x):
if "action" in x["category"].lower():
return fake_action_chain
elif "query" in x["category"].lower():
return fake_query_chain
else:
return general_chain.with_config(configurable={"memory": x["memory"]})
router_chain = (
{
"user_input": RunnablePassthrough(),
"date": RunnablePassthrough(),
"memory": RunnablePassthrough(),
"long_term_memory": RunnablePassthrough(),
"category": categorizer_chain,
}
| RunnableLambda(category_router)
| StrOutputParser()
)
| [
"{user_input}",
"./sys_input_categorizer.yaml"
] |
2024-01-10 | jannawro/AIfred | tools~compare_documents.py | import sys
from langchain_community.embeddings.openai import OpenAIEmbeddings
import json
import numpy as np
from numpy.linalg import norm
from typing import List
def cosine_similarity(a: List[float], b: List[float]) -> float:
return np.dot(a, b) / (norm(a) * norm(b))
"""
required environment variables:
OPENAI_API_KEY
the scripts expects to be given filepaths to documents as args
document structure(should be a json file):
{
"content": "...",
"key": "..." // a memory key from memory schema
}
"""
def main():
vectors = []
for arg in sys.argv[1:]:
print("opening ", arg)
with open(arg) as json_data:
document = json.load(json_data)
vectors.append(OpenAIEmbeddings().embed_query(document["content"]))
print(
"Cosine similarity for these documents: ",
cosine_similarity(vectors[0], vectors[1]),
)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | jannawro/AIfred | aifred~chains~memory.py | from typing import List
from langchain.chat_models.openai import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.schema import StrOutputParser
from langchain.output_parsers import PydanticOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain_core.pydantic_v1 import BaseModel
class MemoryCategory(BaseModel):
key: str
class MemoryCategories(BaseModel):
categories: List[MemoryCategory]
input_to_memory_category_list = (
{
"user_input": RunnablePassthrough(),
"date": RunnablePassthrough(),
"memory_schema": RunnablePassthrough(),
}
| ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template_file(
template_file="./prompts/sys_input_to_memory_categories.yaml",
input_variables=["date", "memory_schema"],
),
HumanMessagePromptTemplate.from_template("{user_input}"),
]
)
| ChatOpenAI(model="gpt-4", temperature=0.05, max_tokens=256)
| PydanticOutputParser(pydantic_object=MemoryCategories)
)
input_to_memory_category = (
{
"user_input": RunnablePassthrough(),
"date": RunnablePassthrough(),
"memory_schema": RunnablePassthrough(),
}
| ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template_file(
template_file="./prompts/sys_input_to_memory_category.yaml",
input_variables=["date", "memory_schema"],
),
HumanMessagePromptTemplate.from_template("{user_input}"),
]
)
| ChatOpenAI(model="gpt-4", temperature=0.05, max_tokens=15)
| PydanticOutputParser(pydantic_object=MemoryCategory)
)
memory_synthesizer = (
{"old_memory": RunnablePassthrough(), "new_memory": RunnablePassthrough()}
| ChatPromptTemplate.from_messages(
[
HumanMessagePromptTemplate.from_template_file(
template_file="./prompts/user_memory_synthesizer.yaml",
input_variables=["old_memory", "new_memory"],
)
]
)
| ChatOpenAI(model="gpt-3.5-turbo", temperature=0, max_tokens=256)
| StrOutputParser()
)
| [
"./prompts/user_memory_synthesizer.yaml",
"./prompts/sys_input_to_memory_category.yaml",
"new_memory",
"memory_schema",
"{user_input}",
"./prompts/sys_input_to_memory_categories.yaml",
"old_memory"
] |
2024-01-10 | jannawro/AIfred | tools~add_documents.py | from datetime import datetime
import os
import sys
import json
from uuid import uuid4
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores.qdrant import Qdrant
from qdrant_client import QdrantClient
"""
required environment variables:
QDRANT_URL
OPENAI_API_KEY
the scripts expects to be given filepaths to documents as args
document structure(should be a json file):
{
"content": "...",
"key": "..." // a memory key from memory schema
}
"""
def main():
documents = []
client = QdrantClient(url=os.getenv("QDRANT_URL"))
doc_store = Qdrant(
client=client, collection_name="documents", embeddings=OpenAIEmbeddings()
)
date = datetime.now().strftime("%d/%m/%Y") + " (DD/MM/YYYY)"
for arg in sys.argv[1:]:
with open(arg) as json_data:
data = json.load(json_data)
documents.append(
{
"content": data["content"],
"metadata": {
"key": data["key"],
"last_updated": date,
"uuid": str(uuid4()),
},
}
)
doc_store.add_texts(
texts=[document["content"] for document in documents],
metadatas=[document["metadata"] for document in documents],
ids=[document["metadata"]["uuid"] for document in documents],
)
if __name__ == "__main__":
main()
| [
"content"
] |
2024-01-10 | jannawro/AIfred | aifred~chains~general.py | from langchain_community.chat_models.openai import ChatOpenAI
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import ConfigurableField, RunnablePassthrough
general_chain = (
{
"user_input": RunnablePassthrough(),
"long_term_memory": RunnablePassthrough(),
"date": RunnablePassthrough(),
}
| ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template_file(
template_file="./prompts/sys_general_chain.yaml",
input_variables=["date", "long_term_memory"],
),
MessagesPlaceholder(variable_name="recent_messages"),
HumanMessagePromptTemplate.from_template("{user_input}"),
]
)
| ChatOpenAI(model="gpt-4").configurable_fields(
memory=ConfigurableField(
id="memory",
)
)
| StrOutputParser()
)
| [
"long_term_memory",
"recent_messages",
"./prompts/sys_general_chain.yaml",
"{user_input}"
] |
2024-01-10 | jannawro/AIfred | aifred~chains~action.py | from langchain_core.prompts.prompt import PromptTemplate
from langchain_community.chat_models.fake import FakeListChatModel
from langchain_core.output_parsers import StrOutputParser
fake_action_chain = (
PromptTemplate.from_template("Stub")
| FakeListChatModel(responses=["Action chain isn't implemented yet."])
| StrOutputParser()
)
| [
"Stub"
] |
2024-01-10 | jannawro/AIfred | aifred~chains~format.py | from langchain_community.chat_models.openai import ChatOpenAI
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import ConfigurableField, RunnablePassthrough
action_succeeded_format_chain = (
{
"user_input": RunnablePassthrough(),
"long_term_memories": RunnablePassthrough()
}
| ChatPromptTemplate.from_messages(
[
HumanMessagePromptTemplate.from_template_file(
template_file="./prompts/user_aifred_action_succesful_format.yaml",
input_variables=["user_input", "long_term_memories"],
),
]
)
| ChatOpenAI(model="gpt-3.5-turbo").configurable_fields(
memory=ConfigurableField(
id="memory",
)
)
| StrOutputParser()
)
action_failed_format_chain = (
{
"user_input": RunnablePassthrough(),
"long_term_memories": RunnablePassthrough(),
"action_output": RunnablePassthrough()
}
| ChatPromptTemplate.from_messages(
[
HumanMessagePromptTemplate.from_template_file(
template_file="./prompts/user_aifred_action_succesful_format.yaml",
input_variables=["user_input", "long_term_memories", "action_output"],
),
]
)
| ChatOpenAI(model="gpt-3.5-turbo").configurable_fields(
memory=ConfigurableField(
id="memory",
)
)
| StrOutputParser()
)
query_format_chain = (
{
"user_input": RunnablePassthrough(),
"long_term_memories": RunnablePassthrough(),
"query_result": RunnablePassthrough(),
}
| ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template_file(
template_file="./prompts/sys_aifred_query_format.yaml",
input_variables=["user_input", "long_term_memories", "query_result"],
),
MessagesPlaceholder(variable_name="recent_messages"),
HumanMessagePromptTemplate.from_template("{user_input}"),
AIMessagePromptTemplate.from_template("Query result:\n{query_result}")
]
)
| ChatOpenAI(model="gpt-4").configurable_fields(
memory=ConfigurableField(
id="memory",
)
)
| StrOutputParser()
)
| [
"action_output",
"query_result",
"./prompts/user_aifred_action_succesful_format.yaml",
"{user_input}",
"user_input",
"Query result:\n{query_result}",
"recent_messages",
"./prompts/sys_aifred_query_format.yaml",
"long_term_memories"
] |
2024-01-10 | codefellows/seattle-code-python-401n8 | class-19.5~in-class-demo~chatgpt~summarizer.py | import os
from dotenv import load_dotenv
from openai import OpenAI
# load .env
load_dotenv(".env")
# set globals
OPEN_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_MODEL = "gpt-3.5-turbo"
def article_summary(article):
client = OpenAI(
api_key=OPEN_API_KEY,
)
chat_completion = client.chat.completions.create(
messages=[
{
"role": "system",
"content": "You are a news reporter."
},
{
"role": "user",
"content": f"""Please summarize this article in 4 sentences:
```text
{article}
```""",
}
],
model=OPENAI_MODEL,
)
return chat_completion.choices[0].message.content
| [
"Please summarize this article in 4 sentences:\n\n```text\nPLACEHOLDER\n```",
"You are a news reporter."
] |
2024-01-10 | lxwlaq/gem5 | configs~example~gem5_library~riscv-ubuntu-run.py | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This script shows an example of running a full system RISCV Ubuntu boot
simulation using the gem5 library. This simulation boots Ubuntu 20.04 using
2 TIMING CPU cores. The simulation ends when the startup is completed
successfully.
Usage
-----
```
scons build/RISCV/gem5.opt
./build/RISCV/gem5.opt \
configs/example/gem5_library/riscv-ubuntu-run.py
```
"""
import m5
from m5.objects import Root
from gem5.utils.requires import requires
from gem5.components.boards.riscv_board import RiscvBoard
from gem5.components.memory import DualChannelDDR4_2400
from gem5.components.processors.simple_processor import (
SimpleProcessor,
)
from gem5.components.processors.cpu_types import CPUTypes
from gem5.isas import ISA
from gem5.coherence_protocol import CoherenceProtocol
from gem5.resources.resource import Resource
# This runs a check to ensure the gem5 binary is compiled for RISCV.
requires(
isa_required=ISA.RISCV,
)
# With RISCV, we use simple caches.
from gem5.components.cachehierarchies.classic\
.private_l1_private_l2_cache_hierarchy import (
PrivateL1PrivateL2CacheHierarchy,
)
# Here we setup the parameters of the l1 and l2 caches.
cache_hierarchy = PrivateL1PrivateL2CacheHierarchy(
l1d_size="16kB",
l1i_size="16kB",
l2_size="256kB",
)
# Memory: Dual Channel DDR4 2400 DRAM device.
memory = DualChannelDDR4_2400(size = "3GB")
# Here we setup the processor. We use a simple processor.
processor = SimpleProcessor(
cpu_type=CPUTypes.TIMING,
num_cores=2,
)
# Here we setup the board. The RiscvBoard allows for Full-System RISCV
# simulations.
board = RiscvBoard(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
# Here we set the Full System workload.
# The `set_kernel_disk_workload` function for the RiscvBoard accepts a
# RISCV bootloader and a disk image. Once the system successfully boots, it
# encounters an `m5_exit instruction encountered`. We stop the simulation then.
# When the simulation has ended you may inspect `m5out/system.pc.com_1.device`
# to see the stdout.
board.set_kernel_disk_workload(
# The RISCV bootloader will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
# The riscv-ubuntu boot-test was tested with riscv-bootloader-5.10
kernel=Resource(
"riscv-bootloader-vmlinux-5.10",
),
# The RISCV ubuntu image will be automatically downloaded to the
# `~/.cache/gem5` directory if not already present.
disk_image=Resource(
"riscv-ubuntu-20.04-img",
),
)
root = Root(full_system=True, system=board)
m5.instantiate()
# We simulate the system till we encounter `m5_exit instruction encountered`.
exit_event = m5.simulate()
# We check whether the simulation ended with `m5_exit instruction encountered`
if exit_event.getCause() == "m5_exit instruction encountered":
# We acknowledge the user that the boot was successful.
print("Successfully completed booting!")
else:
# `m5_exit instruction encountered` was never encountered. We exit the
# program unsuccessfully.
print("The startup was not completed successfully!",)
print(
"Exiting @ tick {} because {}."\
.format(m5.curTick(), exit_event.getCause())
)
exit(-1)
# We are done with the simulation. We exit the program now.
print(
"Exiting @ tick {} because {}."\
.format(m5.curTick(), exit_event.getCause())
)
| [] |
2024-01-10 | chienhung1519/streamlit-chatgpt | Doctor.py | import openai
import streamlit as st
from streamlit_chat import message
import os
# Setting page title and header
st.set_page_config(page_title="AVA", page_icon=":robot_face:")
st.markdown("<h1 style='text-align: center;'>Doctor ChatGPT</h1>", unsafe_allow_html=True)
# Set org ID and API key
# openai.organization = "<YOUR_OPENAI_ORG_ID>"
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Set language
language = st.sidebar.radio("Choose a language:", ("English", "Chinese"))
lang_prompt = "Response in English." if language == "English" else "่ซ็จ็น้ซไธญๆๅ่ฆใ"
# Initialise session state variables
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
if 'messages' not in st.session_state:
st.session_state['messages'] = [
{"role": "system", f"content": "Please play the role of a empathetic and kind psychiatrist. Your task is to conduct a professional diagnosis conversation with me based on the DSM-5 criteria, but using your own language. Please only ask one question at a time. You need to ask in-depth questions, such as the duration, causes and specific manifestations of some symptoms. You need to use various empathetic strategies, such as understanding, support and encouragement to give me a more comfortable experience."},
{"role": "system", f"content": lang_prompt}
]
# if 'model_name' not in st.session_state:
# st.session_state['model_name'] = []
# if 'cost' not in st.session_state:
# st.session_state['cost'] = []
# if 'total_tokens' not in st.session_state:
# st.session_state['total_tokens'] = []
# if 'total_cost' not in st.session_state:
# st.session_state['total_cost'] = 0.0
if 'page' not in st.session_state:
st.session_state['page'] = ""
# Set page
if st.session_state['page'] == "":
st.session_state['page'] = "doctor"
if st.session_state['page'] == "patient":
st.session_state['generated'] = []
st.session_state['past'] = []
st.session_state['messages'] = [
{"role": "system", f"content": "Please play the role of a empathetic and kind psychiatrist. Your task is to conduct a professional diagnosis conversation with me based on the DSM-5 criteria, but using your own language. Please only ask one question at a time. You need to ask in-depth questions, such as the duration, causes and specific manifestations of some symptoms. You need to use various empathetic strategies, such as understanding, support and encouragement to give me a more comfortable experience."},
{"role": "system", f"content": lang_prompt}
]
st.session_state['page'] = "doctor"
# Sidebar - let user choose model, show total cost of current conversation, and let user clear the current conversation
# st.sidebar.title("Sidebar")
# model_name = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-4"))
# counter_placeholder = st.sidebar.empty()
# counter_placeholder.write(f"Total cost of this conversation: ${st.session_state['total_cost']:.5f}")
clear_button = st.sidebar.button("Clear Conversation", key="clear")
# Map model names to OpenAI model IDs
# if model_name == "GPT-3.5":
# model = "gpt-3.5-turbo"
# else:
# model = "gpt-4"
model = "gpt-3.5-turbo"
# reset everything
if clear_button:
st.session_state['generated'] = []
st.session_state['past'] = []
st.session_state['messages'] = [
{"role": "system", f"content": "Please play the role of a empathetic and kind psychiatrist. Your task is to conduct a professional diagnosis conversation with me based on the DSM-5 criteria, but using your own language. Please only ask one question at a time. You need to ask in-depth questions, such as the duration, causes and specific manifestations of some symptoms. You need to use various empathetic strategies, such as understanding, support and encouragement to give me a more comfortable experience."},
{"role": "system", f"content": lang_prompt}
]
# st.session_state['number_tokens'] = []
# st.session_state['model_name'] = []
# st.session_state['cost'] = []
# st.session_state['total_cost'] = 0.0
# st.session_state['total_tokens'] = []
# counter_placeholder.write(f"Total cost of this conversation: ${st.session_state['total_cost']:.5f}")
# generate a response
def generate_response(prompt):
st.session_state['messages'].append({"role": "user", "content": prompt})
completion = openai.ChatCompletion.create(
model=model,
messages=st.session_state['messages']
)
response = completion.choices[0].message.content
st.session_state['messages'].append({"role": "assistant", "content": response})
# print(st.session_state['messages'])
# total_tokens = completion.usage.total_tokens
# prompt_tokens = completion.usage.prompt_tokens
# completion_tokens = completion.usage.completion_tokens
# return response, total_tokens, prompt_tokens, completion_tokens
return response
# container for chat history
response_container = st.container()
# container for text box
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_area("You:", key='input', height=100)
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
# output, total_tokens, prompt_tokens, completion_tokens = generate_response(user_input)
output = generate_response(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
# st.session_state['model_name'].append(model_name)
# st.session_state['total_tokens'].append(total_tokens)
# from https://openai.com/pricing#language-models
# if model_name == "GPT-3.5":
# cost = total_tokens * 0.002 / 1000
# else:
# cost = (prompt_tokens * 0.03 + completion_tokens * 0.06) / 1000
# st.session_state['cost'].append(cost)
# st.session_state['total_cost'] += cost
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user')
message(st.session_state["generated"][i], key=str(i))
# st.write(
# f"Model used: {st.session_state['model_name'][i]}; Number of tokens: {st.session_state['total_tokens'][i]}; Cost: ${st.session_state['cost'][i]:.5f}")
# counter_placeholder.write(f"Total cost of this conversation: ${st.session_state['total_cost']:.5f}") | [
"Response in English.",
"Please play the role of a empathetic and kind psychiatrist. Your task is to conduct a professional diagnosis conversation with me based on the DSM-5 criteria, but using your own language. Please only ask one question at a time. You need to ask in-depth questions, such as the duration, causes and specific manifestations of some symptoms. You need to use various empathetic strategies, such as understanding, support and encouragement to give me a more comfortable experience."
] |
2024-01-10 | scantist-ossops/mindsdb | mindsdb~integrations~handlers~rag_handler~settings.py | import json
from dataclasses import dataclass
from functools import lru_cache, partial
from typing import Any, Dict, List, Union
import html2text
import openai
import pandas as pd
import requests
import writer
from langchain import Writer
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.docstore.document import Document
from langchain.document_loaders import DataFrameLoader
from langchain.embeddings.base import Embeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS, Chroma, VectorStore
from pydantic import BaseModel, Extra, Field, validator
from mindsdb.integrations.handlers.chromadb_handler.chromadb_handler import get_chromadb
from mindsdb.integrations.handlers.rag_handler.exceptions import (
InvalidOpenAIModel,
InvalidPromptTemplate,
InvalidWriterModel,
UnsupportedLLM,
UnsupportedVectorStore,
)
DEFAULT_EMBEDDINGS_MODEL = "BAAI/bge-base-en"
SUPPORTED_VECTOR_STORES = ("chroma", "faiss")
SUPPORTED_LLMS = ("writer", "openai")
## Default parameters for RAG Handler
# this is the default prompt template for qa
DEFAULT_QA_PROMPT_TEMPLATE = """
Use the following pieces of context to answer the question at the end. If you do not know the answer,
just say that you do not know, do not try to make up an answer.
Context: {context}
Question: {question}
Helpful Answer:"""
# this is the default prompt template for if the user wants to summarize the context before qa prompt
DEFAULT_SUMMARIZATION_PROMPT_TEMPLATE = """
Summarize the following texts for me:
{context}
When summarizing, please keep the following in mind the following question:
{question}
"""
DEFAULT_CHUNK_SIZE = 500
DEFAULT_CHUNK_OVERLAP = 50
DEFAULT_VECTOR_STORE_NAME = "chroma"
DEFAULT_VECTOR_STORE_COLLECTION_NAME = "collection"
chromadb = get_chromadb()
def is_valid_store(name) -> bool:
return name in SUPPORTED_VECTOR_STORES
class VectorStoreFactory:
"""Factory class for vector stores"""
@staticmethod
def get_vectorstore_class(name):
if not isinstance(name, str):
raise TypeError("name must be a string")
if not is_valid_store(name):
raise ValueError(f"Invalid vector store {name}")
if name == "faiss":
return FAISS
if name == "chroma":
return Chroma
def get_chroma_client(persist_directory: str) -> chromadb.PersistentClient:
"""Get Chroma client"""
return chromadb.PersistentClient(path=persist_directory)
def get_available_writer_model_ids(args: dict) -> list:
"""Get available writer LLM model ids"""
writer_client = writer.Writer(
api_key=args["writer_api_key"],
organization_id=args["writer_org_id"],
)
res = writer_client.models.list(organization_id=args["writer_org_id"])
available_models_dict = json.loads(res.raw_response.text)
return [model["id"] for model in available_models_dict["models"]]
def get_available_openai_model_ids(args: dict) -> list:
"""Get available openai LLM model ids"""
openai.api_key = args["openai_api_key"]
res = openai.Engine.list()
return [models["id"] for models in res.data]
@dataclass
class PersistedVectorStoreSaverConfig:
vector_store_name: str
persist_directory: str
collection_name: str
vector_store: VectorStore
@dataclass
class PersistedVectorStoreLoaderConfig:
vector_store_name: str
embeddings_model: Embeddings
persist_directory: str
collection_name: str
class PersistedVectorStoreSaver:
"""Saves vector store to disk"""
def __init__(self, config: PersistedVectorStoreSaverConfig):
self.config = config
def save_vector_store(self, vector_store: VectorStore):
method_name = f"save_{self.config.vector_store_name}"
getattr(self, method_name)(vector_store)
def save_chroma(self, vector_store: Chroma):
"""Save Chroma vector store to disk"""
# no need to save chroma vector store to disk, auto save
pass
def save_faiss(self, vector_store: FAISS):
vector_store.save_local(
folder_path=self.config.persist_directory,
index_name=self.config.collection_name,
)
class PersistedVectorStoreLoader:
"""Loads vector store from disk"""
def __init__(self, config: PersistedVectorStoreLoaderConfig):
self.config = config
def load_vector_store_client(
self,
vector_store: str,
):
"""Load vector store from the persisted vector store"""
if vector_store == "chroma":
return Chroma(
collection_name=self.config.collection_name,
embedding_function=self.config.embeddings_model,
client=get_chroma_client(self.config.persist_directory),
)
elif vector_store == "faiss":
return FAISS.load_local(
folder_path=self.config.persist_directory,
embeddings=self.config.embeddings_model,
index_name=self.config.collection_name,
)
else:
raise NotImplementedError(f"{vector_store} client is not yet supported")
def load_vector_store(self) -> VectorStore:
"""Load vector store from the persisted vector store"""
method_name = f"load_{self.config.vector_store_name}"
return getattr(self, method_name)()
def load_chroma(self) -> Chroma:
"""Load Chroma vector store from the persisted vector store"""
return self.load_vector_store_client(vector_store="chroma")
def load_faiss(self) -> FAISS:
"""Load FAISS vector store from the persisted vector store"""
return self.load_vector_store_client(vector_store="faiss")
class LLMParameters(BaseModel):
"""Model parameters for the LLM API interface"""
llm_name: str = Field(default_factory=str, title="LLM API name")
max_tokens: int = Field(default=100, title="max tokens in response")
temperature: float = Field(default=0.0, title="temperature")
top_p: float = 1
best_of: int = 5
stop: List[str] = None
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
use_enum_values = True
class OpenAIParameters(LLMParameters):
"""Model parameters for the LLM API interface"""
openai_api_key: str
model_id: str = Field(default="text-davinci-003", title="model name")
n: int = Field(default=1, title="number of responses to return")
@validator("model_id")
def openai_model_must_be_supported(cls, v, values):
supported_models = get_available_openai_model_ids(values)
if v not in supported_models:
raise InvalidOpenAIModel(
f"'model_id' must be one of {supported_models}, got {v}"
)
return v
class WriterLLMParameters(LLMParameters):
"""Model parameters for the Writer LLM API interface"""
writer_api_key: str
writer_org_id: str = None
base_url: str = None
model_id: str = "palmyra-x"
callbacks: List[StreamingStdOutCallbackHandler] = [StreamingStdOutCallbackHandler()]
verbose: bool = False
@validator("model_id")
def writer_model_must_be_supported(cls, v, values):
supported_models = get_available_writer_model_ids(values)
if v not in supported_models:
raise InvalidWriterModel(
f"'model_id' must be one of {supported_models}, got {v}"
)
return v
class LLMLoader(BaseModel):
llm_config: dict
config_dict: dict = None
def load_llm(self) -> Union[Writer, partial]:
"""Load LLM"""
method_name = f"load_{self.llm_config['llm_name']}_llm"
self.config_dict = self.llm_config.copy()
self.config_dict.pop("llm_name")
return getattr(self, method_name)()
def load_writer_llm(self) -> Writer:
"""Load Writer LLM API interface"""
return Writer(**self.config_dict)
def load_openai_llm(self) -> partial:
"""Load OpenAI LLM API interface"""
openai.api_key = self.config_dict["openai_api_key"]
config = self.config_dict.copy()
config.pop("openai_api_key")
config["model"] = config.pop("model_id")
return partial(openai.Completion.create, **config)
class RAGBaseParameters(BaseModel):
"""Base model parameters for RAG Handler"""
llm_params: Any
vector_store_folder_name: str
prompt_template: str = DEFAULT_QA_PROMPT_TEMPLATE
chunk_size: int = DEFAULT_CHUNK_SIZE
chunk_overlap: int = DEFAULT_CHUNK_OVERLAP
url: Union[str, List[str]] = None
run_embeddings: bool = True
top_k: int = 4
embeddings_model_name: str = DEFAULT_EMBEDDINGS_MODEL
context_columns: Union[List[str], str] = None
vector_store_name: str = DEFAULT_VECTOR_STORE_NAME
vector_store: VectorStore = None
collection_name: str = DEFAULT_VECTOR_STORE_COLLECTION_NAME
summarize_context: bool = True
summarization_prompt_template: str = DEFAULT_SUMMARIZATION_PROMPT_TEMPLATE
vector_store_storage_path: str = Field(
default=None, title="don't use this field, it's for internal use only"
)
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
use_enum_values = True
@validator("prompt_template")
def prompt_format_must_be_valid(cls, v):
if "{context}" not in v or "{question}" not in v:
raise InvalidPromptTemplate(
"prompt_template must contain {context} and {question}"
f"\n For example, {DEFAULT_QA_PROMPT_TEMPLATE}"
)
return v
@validator("vector_store_name")
def name_must_be_lower(cls, v):
return v.lower()
@validator("vector_store_name")
def vector_store_must_be_supported(cls, v):
if not is_valid_store(v):
raise UnsupportedVectorStore(
f"currently we only support {', '.join(str(v) for v in SUPPORTED_VECTOR_STORES)} vector store"
)
return v
class RAGHandlerParameters(RAGBaseParameters):
"""Model parameters for create model"""
llm_type: str
llm_params: LLMParameters
@validator("llm_type")
def llm_type_must_be_supported(cls, v):
if v not in SUPPORTED_LLMS:
raise UnsupportedLLM(f"'llm_type' must be one of {SUPPORTED_LLMS}, got {v}")
return v
class DfLoader(DataFrameLoader):
"""
override the load method of langchain.document_loaders.DataFrameLoaders to ignore rows with 'None' values
"""
def __init__(self, data_frame: pd.DataFrame, page_content_column: str):
super().__init__(data_frame=data_frame, page_content_column=page_content_column)
self._data_frame = data_frame
self._page_content_column = page_content_column
def load(self) -> List[Document]:
"""Loads the dataframe as a list of documents"""
documents = []
for n_row, frame in self._data_frame[self._page_content_column].items():
if pd.notnull(frame):
# ignore rows with None values
column_name = self._page_content_column
document_contents = frame
documents.append(
Document(
page_content=document_contents,
metadata={
"source": "dataframe",
"row": n_row,
"column": column_name,
},
)
)
return documents
def df_to_documents(
df: pd.DataFrame, page_content_columns: Union[List[str], str]
) -> List[Document]:
"""Converts a given dataframe to a list of documents"""
documents = []
if isinstance(page_content_columns, str):
page_content_columns = [page_content_columns]
for _, page_content_column in enumerate(page_content_columns):
if page_content_column not in df.columns.tolist():
raise ValueError(
f"page_content_column {page_content_column} not in dataframe columns"
)
loader = DfLoader(data_frame=df, page_content_column=page_content_column)
documents.extend(loader.load())
return documents
def url_to_documents(urls: Union[List[str], str]) -> List[Document]:
"""Converts a given url to a document"""
documents = []
if isinstance(urls, str):
urls = [urls]
for url in urls:
response = requests.get(url, headers=None).text
html_to_text = html2text.html2text(response)
documents.append(Document(page_content=html_to_text, metadata={"source": url}))
return documents
# todo issue#7361 hard coding device to cpu, add support for gpu later on
# e.g. {"device": "gpu" if torch.cuda.is_available() else "cpu"}
@lru_cache()
def load_embeddings_model(embeddings_model_name):
"""Load embeddings model from Hugging Face Hub"""
try:
model_kwargs = {"device": "cpu"}
embedding_model = HuggingFaceEmbeddings(
model_name=embeddings_model_name, model_kwargs=model_kwargs
)
except ValueError:
raise ValueError(
f"The {embeddings_model_name} is not supported, please select a valid option from Hugging Face Hub!"
)
return embedding_model
def on_create_build_llm_params(
args: dict, llm_config_class: Union[WriterLLMParameters, OpenAIParameters]
) -> Dict:
"""build llm params from create args"""
llm_params = {"llm_name": args["llm_type"]}
for param in llm_config_class.__fields__.keys():
if param in args:
llm_params[param] = args.pop(param)
return llm_params
def build_llm_params(args: dict, update=False) -> Dict:
"""build llm params from args"""
if args["llm_type"] == "writer":
llm_config_class = WriterLLMParameters
elif args["llm_type"] == "openai":
llm_config_class = OpenAIParameters
else:
raise UnsupportedLLM(
f"'llm_type' must be one of {SUPPORTED_LLMS}, got {args['llm_type']}"
)
if not args.get("llm_params"):
# for create method only
llm_params = on_create_build_llm_params(args, llm_config_class)
else:
# for predict method only
llm_params = args.pop("llm_params")
if update:
# for update method only
args["llm_params"] = llm_params
return args
args["llm_params"] = llm_config_class(**llm_params)
return args
| [
"prompt_template must contain {context} and {question}\n For example, PLACEHOLDER",
"\nSummarize the following texts for me:\n{context}\n\nWhen summarizing, please keep the following in mind the following question:\n{question}\n",
"\nUse the following pieces of context to answer the question at the end. If you do not know the answer,\njust say that you do not know, do not try to make up an answer.\nContext: {context}\nQuestion: {question}\nHelpful Answer:"
] |
2024-01-10 | scantist-ossops/mindsdb | mindsdb~integrations~handlers~file_handler~file_handler.py | import codecs
import csv
import json
import os
import tempfile
import traceback
from io import BytesIO, StringIO
from pathlib import Path
from urllib.parse import urlparse
import magic
import pandas as pd
import requests
from charset_normalizer import from_bytes
from mindsdb_sql import parse_sql
from mindsdb_sql.parser.ast import DropTables, Select
from mindsdb_sql.parser.ast.base import ASTNode
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import TextLoader, PyPDFLoader
from mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df
from mindsdb.integrations.libs.base import DatabaseHandler
from mindsdb.integrations.libs.response import RESPONSE_TYPE
from mindsdb.integrations.libs.response import HandlerResponse as Response
from mindsdb.integrations.libs.response import HandlerStatusResponse as StatusResponse
DEFAULT_CHUNK_SIZE = 200
DEFAULT_CHUNK_OVERLAP = 50
def clean_cell(val):
if str(val) in ["", " ", " ", "NaN", "nan", "NA"]:
return None
return val
class FileHandler(DatabaseHandler):
"""
Handler for files
"""
name = "files"
def __init__(
self,
name=None,
file_storage=None,
connection_data={},
file_controller=None,
**kwargs,
):
super().__init__(name)
self.parser = parse_sql
self.fs_store = file_storage
self.custom_parser = connection_data.get("custom_parser", None)
self.clean_rows = connection_data.get("clean_rows", True)
self.chunk_size = connection_data.get("chunk_size", DEFAULT_CHUNK_SIZE)
self.chunk_overlap = connection_data.get("chunk_overlap", DEFAULT_CHUNK_OVERLAP)
self.file_controller = file_controller
def connect(self, **kwargs):
return
def disconnect(self, **kwargs):
return
def check_connection(self) -> StatusResponse:
return StatusResponse(True)
def query(self, query: ASTNode) -> Response:
if type(query) == DropTables:
for table_identifier in query.tables:
if (
len(table_identifier.parts) == 2
and table_identifier.parts[0] != self.name
):
return Response(
RESPONSE_TYPE.ERROR,
error_message=f"Can't delete table from database '{table_identifier.parts[0]}'",
)
table_name = table_identifier.parts[-1]
try:
self.file_controller.delete_file(table_name)
except Exception as e:
return Response(
RESPONSE_TYPE.ERROR,
error_message=f"Can't delete table '{table_name}': {e}",
)
return Response(RESPONSE_TYPE.OK)
elif type(query) == Select:
table_name = query.from_table.parts[-1]
file_path = self.file_controller.get_file_path(table_name)
df, _columns = self._handle_source(
file_path,
self.clean_rows,
self.custom_parser,
self.chunk_size,
self.chunk_overlap,
)
result_df = query_df(df, query)
return Response(RESPONSE_TYPE.TABLE, data_frame=result_df)
else:
return Response(
RESPONSE_TYPE.ERROR,
error_message="Only 'select' and 'drop' queries allowed for files",
)
def native_query(self, query: str) -> Response:
ast = self.parser(query, dialect="mindsdb")
return self.query(ast)
@staticmethod
def _handle_source(
file_path,
clean_rows=True,
custom_parser=None,
chunk_size=DEFAULT_CHUNK_SIZE,
chunk_overlap=DEFAULT_CHUNK_OVERLAP,
):
"""
This function takes a file path and returns a pandas dataframe
"""
# get file data io, format and dialect
data, fmt, dialect = FileHandler._get_data_io(file_path)
data.seek(0) # make sure we are at 0 in file pointer
if custom_parser:
header, file_data = custom_parser(data, fmt)
df = pd.DataFrame(file_data, columns=header)
elif fmt == "parquet":
df = pd.read_parquet(data)
elif fmt == "csv":
df = pd.read_csv(data, sep=dialect.delimiter, index_col=False)
elif fmt in ["xlsx", "xls"]:
data.seek(0)
df = pd.read_excel(data)
elif fmt == "json":
data.seek(0)
json_doc = json.loads(data.read())
df = pd.json_normalize(json_doc, max_level=0)
elif fmt == "txt" or fmt == "pdf":
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
if fmt == "txt":
loader = TextLoader(file_path, encoding="utf8")
docs = text_splitter.split_documents(loader.load())
df = pd.DataFrame(
[
{"content": doc.page_content, "metadata": doc.metadata}
for doc in docs
]
)
elif fmt == "pdf":
loader = PyPDFLoader(file_path)
docs = text_splitter.split_documents(loader.load_and_split())
df = pd.DataFrame(
[
{"content": doc.page_content, "metadata": doc.metadata}
for doc in docs
]
)
else:
raise ValueError(
"Could not load file into any format, supported formats are csv, json, xls, xlsx, pdf, txt"
)
header = df.columns.values.tolist()
df = df.rename(columns={key: key.strip() for key in header})
df = df.applymap(clean_cell)
header = [x.strip() for x in header]
col_map = dict((col, col) for col in header)
return df, col_map
@staticmethod
def is_it_parquet(data: BytesIO) -> bool:
# Check first and last 4 bytes equal to PAR1.
# Refer: https://parquet.apache.org/docs/file-format/
parquet_sig = b"PAR1"
data.seek(0, 0)
start_meta = data.read(4)
data.seek(-4, 2)
end_meta = data.read()
data.seek(0)
if start_meta == parquet_sig and end_meta == parquet_sig:
return True
return False
@staticmethod
def is_it_xlsx(file_path: str) -> bool:
file_type = magic.from_file(file_path, mime=True)
if file_type in [
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.ms-excel",
]:
return True
return False
@staticmethod
def is_it_json(data_str: StringIO) -> bool:
# see if its JSON
text = data_str.read(100).strip()
data_str.seek(0)
if len(text) > 0:
# it it looks like a json, then try to parse it
if text.startswith("{") or text.startswith("["):
try:
json.loads(data_str.read())
return True
except Exception:
return False
finally:
data_str.seek(0)
return False
@staticmethod
def is_it_csv(data_str: StringIO) -> bool:
sample = data_str.readline() # trying to get dialect from header
data_str.seek(0)
try:
csv.Sniffer().sniff(sample)
# Avoid a false-positive for json files
try:
json.loads(data_str.read())
data_str.seek(0)
return False
except json.decoder.JSONDecodeError:
data_str.seek(0)
return True
except Exception:
return False
@staticmethod
def _get_data_io(file_path):
"""
@TODO: Use python-magic to simplify the function and detect the file types as the xlsx example
This gets a file either url or local file and defines what the format is as well as dialect
:param file: file path or url
:return: data_io, format, dialect
"""
data = BytesIO()
data_str = None
dialect = None
try:
with open(file_path, "rb") as fp:
data = BytesIO(fp.read())
except Exception as e:
error = "Could not load file, possible exception : {exception}".format(
exception=e
)
print(error)
raise ValueError(error)
suffix = Path(file_path).suffix.strip(".").lower()
if suffix not in ("csv", "json", "xlsx", "parquet"):
if FileHandler.is_it_parquet(data):
suffix = "parquet"
elif FileHandler.is_it_xlsx(file_path):
suffix = "xlsx"
if suffix == "parquet":
return data, "parquet", dialect
if suffix == "xlsx":
return data, "xlsx", dialect
if suffix == "txt":
return data, "txt", dialect
if suffix == "pdf":
return data, "pdf", dialect
byte_str = data.read()
# Move it to StringIO
try:
# Handle Microsoft's BOM "special" UTF-8 encoding
if byte_str.startswith(codecs.BOM_UTF8):
data_str = StringIO(byte_str.decode("utf-8-sig"))
else:
file_encoding_meta = from_bytes(
byte_str[: 32 * 1024],
steps=32, # Number of steps/block to extract from my_byte_str
chunk_size=1024, # Set block size of each extraction)
explain=False,
)
best_meta = file_encoding_meta.best()
errors = "strict"
if best_meta is not None:
encoding = file_encoding_meta.best().encoding
try:
data_str = StringIO(byte_str.decode(encoding, errors))
except UnicodeDecodeError:
encoding = "utf-8"
errors = "replace"
data_str = StringIO(byte_str.decode(encoding, errors))
else:
encoding = "utf-8"
errors = "replace"
data_str = StringIO(byte_str.decode(encoding, errors))
except Exception:
print(traceback.format_exc())
print("Could not load into string")
if suffix not in ("csv", "json"):
if FileHandler.is_it_json(data_str):
suffix = "json"
elif FileHandler.is_it_csv(data_str):
suffix = "csv"
if suffix == "json":
return data_str, suffix, dialect
if suffix == "csv":
try:
dialect = FileHandler._get_csv_dialect(data_str)
if dialect:
return data_str, "csv", dialect
except Exception:
print("Could not detect format for this file")
print(traceback.format_exc())
data_str.seek(0)
data.seek(0)
# No file type identified
return data, None, dialect
@staticmethod
def _get_file_path(path) -> str:
try:
is_url = urlparse(path).scheme in ("http", "https")
except Exception:
is_url = False
if is_url:
path = FileHandler._fetch_url(path)
return path
@staticmethod
def _get_csv_dialect(buffer) -> csv.Dialect:
sample = buffer.readline() # trying to get dialect from header
buffer.seek(0)
try:
if isinstance(sample, bytes):
sample = sample.decode()
accepted_csv_delimiters = [",", "\t", ";"]
try:
dialect = csv.Sniffer().sniff(
sample, delimiters=accepted_csv_delimiters
)
dialect.doublequote = (
True # assume that all csvs have " as string escape
)
except Exception:
dialect = csv.reader(sample).dialect
if dialect.delimiter not in accepted_csv_delimiters:
raise Exception(
f"CSV delimeter '{dialect.delimiter}' is not supported"
)
except csv.Error:
dialect = None
return dialect
@staticmethod
def _fetch_url(url: str) -> str:
temp_dir = tempfile.mkdtemp(prefix="mindsdb_file_url_")
try:
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(os.path.join(temp_dir, "file"), "wb") as f:
for chunk in r:
f.write(chunk)
else:
raise Exception(f"Response status code is {r.status_code}")
except Exception as e:
print(f"Error during getting {url}")
print(e)
raise
return os.path.join(temp_dir, "file")
def get_tables(self) -> Response:
"""
List all files
"""
files_meta = self.file_controller.get_files()
data = [
{
"TABLE_NAME": x["name"],
"TABLE_ROWS": x["row_count"],
"TABLE_TYPE": "BASE TABLE",
}
for x in files_meta
]
return Response(RESPONSE_TYPE.TABLE, data_frame=pd.DataFrame(data))
def get_columns(self, table_name) -> Response:
file_meta = self.file_controller.get_file_meta(table_name)
result = Response(
RESPONSE_TYPE.TABLE,
data_frame=pd.DataFrame(
[
{
"Field": x["name"].strip()
if isinstance(x, dict)
else x.strip(),
"Type": "str",
}
for x in file_meta["columns"]
]
),
)
return result
| [] |
2024-01-10 | AliNaqvi110/Llama2-Medical-Chatbot | app1.py | import streamlit as st
from streamlit_chat import message
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import CTransformers
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
#load the pdf files from the path
loader = DirectoryLoader('data/',glob="*.pdf",loader_cls=PyPDFLoader)
documents = loader.load()
#split text into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50)
text_chunks = text_splitter.split_documents(documents)
#create embeddings
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
model_kwargs={'device':"cpu"})
#vectorstore
vector_store = FAISS.from_documents(text_chunks,embeddings)
#create llm
llm = CTransformers(model="llama-2-7b-chat.ggmlv3.q4_0.bin",model_type="llama",
config={'max_new_tokens':128,'temperature':0.01})
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
chain = ConversationalRetrievalChain.from_llm(llm=llm,chain_type='stuff',
retriever=vector_store.as_retriever(search_kwargs={"k":2}),
memory=memory)
st.title("HealthCare ChatBot ๐ง๐ฝโโ๏ธ")
def conversation_chat(query):
result = chain({"question": query, "chat_history": st.session_state['history']})
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
def initialize_session_state():
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hello! Ask me anything about ๐ค"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey! ๐"]
def display_chat_history():
reply_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Question:", placeholder="Ask about your Mental Health", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
output = conversation_chat(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with reply_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="thumbs")
message(st.session_state["generated"][i], key=str(i), avatar_style="fun-emoji")
# Initialize session state
initialize_session_state()
# Display chat history
display_chat_history()
import streamlit as st
from streamlit_chat import message
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import CTransformers
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
#load the pdf files from the path
loader = DirectoryLoader('medical_bot/dataset/',glob="*.pdf",loader_cls=PyPDFLoader)
documents = loader.load()
#split text into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50)
text_chunks = text_splitter.split_documents(documents)
#create embeddings
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
model_kwargs={'device':"cpu"})
#vectorstore
vector_store = FAISS.from_documents(text_chunks,embeddings)
#create llm
llm = CTransformers(model="medical_bot\llama-2-7b-chat.ggmlv3.q8_0.bin",model_type="llama",
config={'max_new_tokens':128,'temperature':0.01})
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
chain = ConversationalRetrievalChain.from_llm(llm=llm,chain_type='stuff',
retriever=vector_store.as_retriever(search_kwargs={"k":2}),
memory=memory)
st.title("HealthCare ChatBot ๐ง๐ฝโโ๏ธ")
def conversation_chat(query):
result = chain({"question": query, "chat_history": st.session_state['history']})
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
def initialize_session_state():
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hello! Ask me anything about ๐ค"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey! ๐"]
def display_chat_history():
reply_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Question:", placeholder="Ask about your Mental Health", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
output = conversation_chat(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with reply_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="thumbs")
message(st.session_state["generated"][i], key=str(i), avatar_style="fun-emoji")
# Initialize session state
initialize_session_state()
# Display chat history
display_chat_history()
| [] |
2024-01-10 | jiffstudio/hackathon | read_and_segment.py | import os
import json
import requests
from langchain.document_loaders import UnstructuredFileLoader
import streamlit as st
import openai
import random
from learnBi.mycomponent import mycomponent
def make_card(text):
openai.api_key = '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'
# text = "ไป
ไป
้่ฟ็ฅ็ปๅ
่ฟ่กๅญๅจๆฐๆฎ๏ผๅญๅจ่ฝๅๅๅๆ้ใNTM[12]ๆๆฉๆๅบไบๅค้จ่ฎฐๅฟๅขๅผบ็ฅ็ป็ฝ็ปๆถๆ๏ผ้่ฟไธไธชๅคง็ๅฏๅฏปๅ็ๅญๅจๅจๆฅๆฉๅฑๅญๅจ็่ฝๅ๏ผๅฎ็ฐๅญๅจ็ฎก็ๅนถไธๅฏๅพฎใ็ฅ็ปๅพ็ตๆบ็็ตๆๆฅ่ชไบๅพ็ตๆบ็ๆถๆ๏ผ็ฑๆงๅถๅจใๅนถ่ก่ฏปๅๅคดๅๅค้จๅญๅจๅจ็ปๆ๏ผๅฐ็ฅ็ป็ฝ็ปๅๅค้จๅญๅจๅจ็ปๅๆฅๆฉๅฑ็ฅ็ป็ฝ็ป็่ฝๅ๏ผๅฏไปฅไฝฟ็จๆขฏๅบฆไธ้่ฟ่ก้ซๆ่ฎญ็ปใNTM ๅฏไปฅ้่ฟ้ๆฉๆง็่ฏปๅๆไฝไธๅ
ๅญ็ฉ้ต่ฟ่กไบคไบใๅฏไปฅ้่ฟไธค็งๆนๅผ่ฟ่กๅฏปๅ๏ผไธ็งๆฏๅบไบๅ
ๅฎน็ๅฏปๅ๏ผๅฆๅคไธ็งๆฏๅบไบไฝ็ฝฎ็ๅฏปๅใ"
# prompt = "่ฏทๆ นๆฎๆๆไพ็ๆๆฌ๏ผ็ๆไธๅฅๆฝ่ฎคๅกใๅจๅถไฝๆฝ่ฎคๅก็ๆถๅ๏ผ่ฏทๅพช็ฏไธ่ฟฐ่ฆๆฑ๏ผ1.ไฟๆๆฝ่ฎคๅก็็ฎๅใๆธ
ๆฐ๏ผๅนถ้ไธญไบๆ้่ฆ็ไฟกๆฏ2.็กฎไฟ็ญๆกๆฏๅ
ทไฝ็๏ผไฝฟ็จ็ฎๅๆธ
ๆฐ็่ฏญ่จ3.ๅฐ้ไบๅฎ๏ผๅนถไฝฟๅก็ไพฟไบ้
่ฏปๅ็่งฃ4.ไฝฟ็จไธๅๆๆฌ็ธๅ็่ฏญ่จ่ฟ่กๅ็ญ"
prompt = "่ฏทๆ นๆฎๆๆไพ็ๆๆฌ๏ผๅถไฝไธๅฅๆฝ่ฎคๅกใ\
ๅจๅถไฝๆฝ่ฎคๅกๆถ๏ผ่ฏท้ตๅพชไธ่ฟฐ่ฆๆฑ๏ผ\
1. ไฟๆๆฝ่ฎคๅก็็ฎๅใๆธ
ๆฐ๏ผๅนถ้ไธญไบๆ้่ฆ็ไฟกๆฏใ\
2. ็กฎไฟ้ฎ้ขๆฏๅ
ทไฝ็ใไธๅซ็ณ็ใ\
3. ไฝฟ็จๆธ
ๆฐๅ็ฎๆด็่ฏญ่จ๏ผไฝฟๅก็ๆไบ้
่ฏปๅ็่งฃใ\
4. ็ญๆก้ตๅพชๅฎข่งไบๅฎใ\
ๅถไฝๆฝ่ฎคๅกๆถ๏ผ่ฎฉๆไปฌไธๆญฅไธๆญฅๆฅ๏ผ\
็ฌฌไธๆญฅ๏ผ็ปๅไธไธๆ๏ผไฝฟ็จ็ฎๅ็็ธๅ่ฏญ่จๆนๅๅ
ๅฎน๏ผๅๆถไฟ็ๅ
ถๅๆฅ็ๆๆใ\
็ฌฌไบๆญฅ๏ผๅฐๅ
ๅฎนๅๆๅ ไธชๅฐ่๏ผๆฏไธชๅฐ่ไธๆณจไบไธไธช่ฆ็นใ\
็ฌฌไธๆญฅ๏ผๅฉ็จๅฐ่ๆฅ็ๆๅคๅผ ๆฝ่ฎคๅก๏ผๅฏนไบ่ถ
่ฟ50ไธชๅญ็ๅฐ่๏ผๅ
่ฟ่กๆๅๅๆฆๆฌ๏ผๅๅถไฝๆฝ่ฎคๅกใๅช็ๆๆ้่ฆ็ๅ
ๅฎนๅณๅฏใ\
ๆๆฌ๏ผ่กฐ่็ป่็็นๅพๆฏ็ป่ๅ
็ๆฐดๅๅๅฐ๏ผ็ปๆไฝฟ็ป่่็ผฉ๏ผไฝ็งฏๅๅฐ๏ผ็ป่ไปฃ่ฐข็้็ๅๆ
ขใ็ป่ๅ
ๅค็ง้
ถ็ๆดปๆง้ไฝใ็ป่ๆ ธ็ไฝ็งฏๅขๅคง๏ผๆ ธ่ๅ
ๆ๏ผๆ่ฒ่ดจๆถ็ผฉใๆ่ฒๅ ๆทฑใ็ป่่้้ๆงๆนๅ๏ผไฝฟ็ฉ่ดจ่ฟ่พๅ่ฝ้ไฝใ\
ไธๅฅๅก็๏ผ\n\
ๅก็1๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏไผๆไนๅๅ๏ผ\n\
็ญๆก๏ผๅๅฐใ\n\
ๅก็2๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏๅๅ็ๅ
ทไฝ่กจ็ฐๆฏไปไน๏ผ\n\
็ญๆก๏ผ็ป่่็ผฉใ\n\
ๅก็3๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏๅๅๅๅ ๆฏไปไน๏ผ\n\
็ญๆก๏ผ็ป่ๅ
็ๆฐดๅๅๅฐใ\n\
ๅก็4๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่ๅ
็ๆฐดๅๅๅๅฏน็ป่ไปฃ่ฐข็ๅฝฑๅๆฏไปไน๏ผ\n\
็ญๆก๏ผ็ป่ไปฃ่ฐข็้็ๅๆ
ขใ\n\
ๅก็5๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่ๅ
็้
ถๆดปๆงๅฆไฝๅๅ๏ผ\n\
็ญๆก๏ผๆดปๆง้ไฝใ\n\
ๅก็6๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธไฝ็งฏๅฆไฝๅๅ๏ผ\n\
็ญๆก๏ผไฝ็งฏๅๅคงใ\n\
ๅก็7๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ ธ่ๅฆไฝๅๅ๏ผ\
็ญๆก๏ผๆ ธ่ๅ
ๆใ\
ๅก็8๏ผ\
้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ่ฒ่ดจๅฆไฝๅๅ๏ผ\n\
็ญๆก๏ผๆ่ฒ่ดจๆถ็ผฉใ\n\
ๅก็9๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ่ฒ่ดจๅๅๅฏน็ป่ๆ ธๅฝขๆ็ๅฝฑๅๆฏ๏ผ\n\
็ญๆก๏ผๆ่ฒๅ ๆทฑใ\n\
ๅก็10๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็็ฉ่ดจ่ฟ่พๅ่ฝๅฆไฝๅๅ?\n\
็ญๆก๏ผ็ฉ่ดจ่ฟ่พๅ่ฝ้ไฝใ\n\
ๅก็11๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็็ฉ่ดจ่ฟ่พๅ่ฝไธบไฝๅๅ๏ผ\n\
็ญๆก๏ผ็ป่่้้ๆงๆนๅใ\n\
ๆๆฌ๏ผ"
def chat_with_gpt(p):
url = "https://openai.api2d.net/v1/completions"
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer fk205005-4JjeuMSr5qUREGOdRyqpS0pWQ6iAf6sM'
# <-- ๆ fkxxxxx ๆฟๆขๆไฝ ่ชๅทฑ็ Forward Key๏ผๆณจๆๅ้ข็ Bearer ่ฆไฟ็๏ผๅนถไธๅ Key ไธญ้ดๆไธไธช็ฉบๆ ผใ
}
data = {
"model": "text-davinci-003",
"prompt": p,
"max_tokens": 2000,
"n": 5,
"stop": None,
}
response = requests.post(url, headers=headers, json=data)
return response.json()['choices'][0]['text'].strip()
response = chat_with_gpt(prompt + text + "\nไธๅฅๅก็๏ผ\n")
print(response)
def colorful_card(title, ques, ans, color):
style = f"""
background-color: {color};
padding: 15px;
border-radius: 10px;
margin-bottom: 20px;
width: 400px;
height: 260px;
box-shadow: 2px 2px 5px rgba(0, 0, 0, 0.1);
"""
container_style = """
display: flex;
flex-direction: column;
align-items: center;
"""
content = f"{ques}<br>{ans}"
card_html = f"""
<div style="{container_style}">
<div style="{style}">
<h2>{title}</h2>
<p>{content}</p>
</div>
</div>
"""
st.markdown(card_html, unsafe_allow_html=True)
titles = []
ques = []
ans = []
colors = ["#98FF98", "#FFC0CB", "#C8A2C8", "#87CEEB", "#FFFACD", "#ADD8E6", "#32CD32", "#E6E6FA", "#00CED1",
"#90EE90", "#FFD700"]
lines = response.splitlines()
lines = [s for s in lines if s != '']
print(lines)
random_elements = random.sample(colors, len(lines) // 3)
print(random_elements)
for i in range(len(response.splitlines())):
if i % 3 == 0:
titles.append(lines[i])
if i % 3 == 1:
ques.append(lines[i])
if i % 3 == 2:
ans.append(lines[i])
print(titles)
print(ques)
print(ans)
for i in range(len(ans)):
colorful_card(titles[i], ques[i], ans[i], random_elements[i])
def get_first_card(text):
messages = [{"role": "user", "content": f'''Imagine you are a Text-to-Card Converter. Your task is to take lengthy pieces of text and break them down into several small, easily digestible cards for the user to read. Each card should encapsulate a focused concept but also need to faithfully replicate the original text, including a title and content. Importantly, the language used in the cards must be in Chinese. Some parts may have formatting issues, please fix them. Below is the original text.
---------------------------------
{text}'''}]
functions = [
{
"name": "get_first_card",
"description": "Get first card in a given text",
"parameters": {
"type": "object",
"properties": {
"card": {
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "The title, e.g. Concept of RLHF, keep it blank if not focused enough",
},
"content": {
"type": "string",
"description": "The content",
},
}
},
"remaining": {
"type": "string",
"description": "The first 10 words of remaining text that is not included in the first card",
},
},
"required": ["card", "remaining"],
},
}
]
import requests
url = "https://openai.api2d.net/v1/chat/completions"
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer fk205005-4JjeuMSr5qUREGOdRyqpS0pWQ6iAf6sM'
# <-- ๆ fkxxxxx ๆฟๆขๆไฝ ่ชๅทฑ็ Forward Key๏ผๆณจๆๅ้ข็ Bearer ่ฆไฟ็๏ผๅนถไธๅ Key ไธญ้ดๆไธไธช็ฉบๆ ผใ
}
data = {
"model": "gpt-3.5-turbo-0613",
"messages": messages,
"functions": functions,
"function_call": "auto",
}
response = requests.post(url, headers=headers, json=data)
print("Status Code", response.status_code)
print("JSON Response ", response.json())
return response.json()
st.header("PDF Import and Display")
uploaded_file = st.file_uploader("Choose a PDF file", type=['pdf', 'docx', 'txt'])
if uploaded_file is not None:
file_details = {"FileName": uploaded_file.name, "FileType": uploaded_file.type, "FileSize": uploaded_file.size}
st.write(file_details)
with open(os.path.join("pdf_files", uploaded_file.name), "wb") as f:
f.write(uploaded_file.getbuffer())
file_path = os.path.join("pdf_files", uploaded_file.name)
st.write(file_path)
loader = UnstructuredFileLoader(file_path, mode="elements")
docs = loader.load()
print([doc.page_content for doc in docs])
text = '\n'.join([doc.page_content for doc in docs])
print(st.session_state)
selected = None
if 'cards' in st.session_state:
for card in st.session_state.cards:
value = mycomponent(my_input_value=f'<em>{card["title"]}</em><br>{card["content"]}')
if value and len(value) > 0:
selected = value
if selected:
st.write(selected)
make_card(selected)
if 'remaining' not in st.session_state or len(st.session_state.remaining) > 10:
if st.button('็ปง็ปญ'):
if 'remaining' not in st.session_state:
st.session_state.remaining = text
st.session_state.cards = []
arguments = json.loads(get_first_card(st.session_state.remaining[:1000])['choices'][0]['message']['function_call']['arguments'])
st.session_state.remaining = st.session_state.remaining[st.session_state.remaining.find(arguments["remaining"][:4]):]
st.session_state.cards.append(arguments["card"])
st.experimental_rerun() | [
"Imagine you are a Text-to-Card Converter. Your task is to take lengthy pieces of text and break them down into several small, easily digestible cards for the user to read. Each card should encapsulate a focused concept but also need to faithfully replicate the original text, including a title and content. Importantly, the language used in the cards must be in Chinese. Some parts may have formatting issues, please fix them. Below is the original text.\n ---------------------------------\n PLACEHOLDER",
"่ฏทๆ นๆฎๆๆไพ็ๆๆฌ๏ผๅถไฝไธๅฅๆฝ่ฎคๅกใ ๅจๅถไฝๆฝ่ฎคๅกๆถ๏ผ่ฏท้ตๅพชไธ่ฟฐ่ฆๆฑ๏ผ 1. ไฟๆๆฝ่ฎคๅก็็ฎๅใๆธ
ๆฐ๏ผๅนถ้ไธญไบๆ้่ฆ็ไฟกๆฏใ 2. ็กฎไฟ้ฎ้ขๆฏๅ
ทไฝ็ใไธๅซ็ณ็ใ 3. ไฝฟ็จๆธ
ๆฐๅ็ฎๆด็่ฏญ่จ๏ผไฝฟๅก็ๆไบ้
่ฏปๅ็่งฃใ 4. ็ญๆก้ตๅพชๅฎข่งไบๅฎใ ๅถไฝๆฝ่ฎคๅกๆถ๏ผ่ฎฉๆไปฌไธๆญฅไธๆญฅๆฅ๏ผ ็ฌฌไธๆญฅ๏ผ็ปๅไธไธๆ๏ผไฝฟ็จ็ฎๅ็็ธๅ่ฏญ่จๆนๅๅ
ๅฎน๏ผๅๆถไฟ็ๅ
ถๅๆฅ็ๆๆใ ็ฌฌไบๆญฅ๏ผๅฐๅ
ๅฎนๅๆๅ ไธชๅฐ่๏ผๆฏไธชๅฐ่ไธๆณจไบไธไธช่ฆ็นใ ็ฌฌไธๆญฅ๏ผๅฉ็จๅฐ่ๆฅ็ๆๅคๅผ ๆฝ่ฎคๅก๏ผๅฏนไบ่ถ
่ฟ50ไธชๅญ็ๅฐ่๏ผๅ
่ฟ่กๆๅๅๆฆๆฌ๏ผๅๅถไฝๆฝ่ฎคๅกใๅช็ๆๆ้่ฆ็ๅ
ๅฎนๅณๅฏใ ๆๆฌ๏ผ่กฐ่็ป่็็นๅพๆฏ็ป่ๅ
็ๆฐดๅๅๅฐ๏ผ็ปๆไฝฟ็ป่่็ผฉ๏ผไฝ็งฏๅๅฐ๏ผ็ป่ไปฃ่ฐข็้็ๅๆ
ขใ็ป่ๅ
ๅค็ง้
ถ็ๆดปๆง้ไฝใ็ป่ๆ ธ็ไฝ็งฏๅขๅคง๏ผๆ ธ่ๅ
ๆ๏ผๆ่ฒ่ดจๆถ็ผฉใๆ่ฒๅ ๆทฑใ็ป่่้้ๆงๆนๅ๏ผไฝฟ็ฉ่ดจ่ฟ่พๅ่ฝ้ไฝใ ไธๅฅๅก็๏ผ\n ๅก็1๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏไผๆไนๅๅ๏ผ\n ็ญๆก๏ผๅๅฐใ\n ๅก็2๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏๅๅ็ๅ
ทไฝ่กจ็ฐๆฏไปไน๏ผ\n ็ญๆก๏ผ็ป่่็ผฉใ\n ๅก็3๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏๅๅๅๅ ๆฏไปไน๏ผ\n ็ญๆก๏ผ็ป่ๅ
็ๆฐดๅๅๅฐใ\n ๅก็4๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่ๅ
็ๆฐดๅๅๅๅฏน็ป่ไปฃ่ฐข็ๅฝฑๅๆฏไปไน๏ผ\n ็ญๆก๏ผ็ป่ไปฃ่ฐข็้็ๅๆ
ขใ\n ๅก็5๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่ๅ
็้
ถๆดปๆงๅฆไฝๅๅ๏ผ\n ็ญๆก๏ผๆดปๆง้ไฝใ\n ๅก็6๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธไฝ็งฏๅฆไฝๅๅ๏ผ\n ็ญๆก๏ผไฝ็งฏๅๅคงใ\n ๅก็7๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ ธ่ๅฆไฝๅๅ๏ผ ็ญๆก๏ผๆ ธ่ๅ
ๆใ ๅก็8๏ผ ้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ่ฒ่ดจๅฆไฝๅๅ๏ผ\n ็ญๆก๏ผๆ่ฒ่ดจๆถ็ผฉใ\n ๅก็9๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ่ฒ่ดจๅๅๅฏน็ป่ๆ ธๅฝขๆ็ๅฝฑๅๆฏ๏ผ\n ็ญๆก๏ผๆ่ฒๅ ๆทฑใ\n ๅก็10๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็็ฉ่ดจ่ฟ่พๅ่ฝๅฆไฝๅๅ?\n ็ญๆก๏ผ็ฉ่ดจ่ฟ่พๅ่ฝ้ไฝใ\n ๅก็11๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็็ฉ่ดจ่ฟ่พๅ่ฝไธบไฝๅๅ๏ผ\n ็ญๆก๏ผ็ป่่้้ๆงๆนๅใ\n ๆๆฌ๏ผ",
"{'type': 'string', 'description': 'The content'}"
] |
2024-01-10 | jiffstudio/hackathon | make_card.py | import openai
import streamlit as st
import random
def make_card(text):
openai.api_key = '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'
# text = "ไป
ไป
้่ฟ็ฅ็ปๅ
่ฟ่กๅญๅจๆฐๆฎ๏ผๅญๅจ่ฝๅๅๅๆ้ใNTM[12]ๆๆฉๆๅบไบๅค้จ่ฎฐๅฟๅขๅผบ็ฅ็ป็ฝ็ปๆถๆ๏ผ้่ฟไธไธชๅคง็ๅฏๅฏปๅ็ๅญๅจๅจๆฅๆฉๅฑๅญๅจ็่ฝๅ๏ผๅฎ็ฐๅญๅจ็ฎก็ๅนถไธๅฏๅพฎใ็ฅ็ปๅพ็ตๆบ็็ตๆๆฅ่ชไบๅพ็ตๆบ็ๆถๆ๏ผ็ฑๆงๅถๅจใๅนถ่ก่ฏปๅๅคดๅๅค้จๅญๅจๅจ็ปๆ๏ผๅฐ็ฅ็ป็ฝ็ปๅๅค้จๅญๅจๅจ็ปๅๆฅๆฉๅฑ็ฅ็ป็ฝ็ป็่ฝๅ๏ผๅฏไปฅไฝฟ็จๆขฏๅบฆไธ้่ฟ่ก้ซๆ่ฎญ็ปใNTM ๅฏไปฅ้่ฟ้ๆฉๆง็่ฏปๅๆไฝไธๅ
ๅญ็ฉ้ต่ฟ่กไบคไบใๅฏไปฅ้่ฟไธค็งๆนๅผ่ฟ่กๅฏปๅ๏ผไธ็งๆฏๅบไบๅ
ๅฎน็ๅฏปๅ๏ผๅฆๅคไธ็งๆฏๅบไบไฝ็ฝฎ็ๅฏปๅใ"
# prompt = "่ฏทๆ นๆฎๆๆไพ็ๆๆฌ๏ผ็ๆไธๅฅๆฝ่ฎคๅกใๅจๅถไฝๆฝ่ฎคๅก็ๆถๅ๏ผ่ฏทๅพช็ฏไธ่ฟฐ่ฆๆฑ๏ผ1.ไฟๆๆฝ่ฎคๅก็็ฎๅใๆธ
ๆฐ๏ผๅนถ้ไธญไบๆ้่ฆ็ไฟกๆฏ2.็กฎไฟ็ญๆกๆฏๅ
ทไฝ็๏ผไฝฟ็จ็ฎๅๆธ
ๆฐ็่ฏญ่จ3.ๅฐ้ไบๅฎ๏ผๅนถไฝฟๅก็ไพฟไบ้
่ฏปๅ็่งฃ4.ไฝฟ็จไธๅๆๆฌ็ธๅ็่ฏญ่จ่ฟ่กๅ็ญ"
prompt = "่ฏทๆ นๆฎๆๆไพ็ๆๆฌ๏ผๅถไฝไธๅฅๆฝ่ฎคๅกใ\
ๅจๅถไฝๆฝ่ฎคๅกๆถ๏ผ่ฏท้ตๅพชไธ่ฟฐ่ฆๆฑ๏ผ\
1. ไฟๆๆฝ่ฎคๅก็็ฎๅใๆธ
ๆฐ๏ผๅนถ้ไธญไบๆ้่ฆ็ไฟกๆฏใ\
2. ็กฎไฟ้ฎ้ขๆฏๅ
ทไฝ็ใไธๅซ็ณ็ใ\
3. ไฝฟ็จๆธ
ๆฐๅ็ฎๆด็่ฏญ่จ๏ผไฝฟๅก็ๆไบ้
่ฏปๅ็่งฃใ\
4. ็ญๆก้ตๅพชๅฎข่งไบๅฎใ\
ๅถไฝๆฝ่ฎคๅกๆถ๏ผ่ฎฉๆไปฌไธๆญฅไธๆญฅๆฅ๏ผ\
็ฌฌไธๆญฅ๏ผ็ปๅไธไธๆ๏ผไฝฟ็จ็ฎๅ็็ธๅ่ฏญ่จๆนๅๅ
ๅฎน๏ผๅๆถไฟ็ๅ
ถๅๆฅ็ๆๆใ\
็ฌฌไบๆญฅ๏ผๅฐๅ
ๅฎนๅๆๅ ไธชๅฐ่๏ผๆฏไธชๅฐ่ไธๆณจไบไธไธช่ฆ็นใ\
็ฌฌไธๆญฅ๏ผๅฉ็จๅฐ่ๆฅ็ๆๅคๅผ ๆฝ่ฎคๅก๏ผๅฏนไบ่ถ
่ฟ50ไธชๅญ็ๅฐ่๏ผๅ
่ฟ่กๆๅๅๆฆๆฌ๏ผๅๅถไฝๆฝ่ฎคๅกใๅช็ๆๆ้่ฆ็ๅ
ๅฎนๅณๅฏใ\
ๆๆฌ๏ผ่กฐ่็ป่็็นๅพๆฏ็ป่ๅ
็ๆฐดๅๅๅฐ๏ผ็ปๆไฝฟ็ป่่็ผฉ๏ผไฝ็งฏๅๅฐ๏ผ็ป่ไปฃ่ฐข็้็ๅๆ
ขใ็ป่ๅ
ๅค็ง้
ถ็ๆดปๆง้ไฝใ็ป่ๆ ธ็ไฝ็งฏๅขๅคง๏ผๆ ธ่ๅ
ๆ๏ผๆ่ฒ่ดจๆถ็ผฉใๆ่ฒๅ ๆทฑใ็ป่่้้ๆงๆนๅ๏ผไฝฟ็ฉ่ดจ่ฟ่พๅ่ฝ้ไฝใ\
ไธๅฅๅก็๏ผ\n\
ๅก็1๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏไผๆไนๅๅ๏ผ\n\
็ญๆก๏ผๅๅฐใ\n\
ๅก็2๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏๅๅ็ๅ
ทไฝ่กจ็ฐๆฏไปไน๏ผ\n\
็ญๆก๏ผ็ป่่็ผฉใ\n\
ๅก็3๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏๅๅๅๅ ๆฏไปไน๏ผ\n\
็ญๆก๏ผ็ป่ๅ
็ๆฐดๅๅๅฐใ\n\
ๅก็4๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่ๅ
็ๆฐดๅๅๅๅฏน็ป่ไปฃ่ฐข็ๅฝฑๅๆฏไปไน๏ผ\n\
็ญๆก๏ผ็ป่ไปฃ่ฐข็้็ๅๆ
ขใ\n\
ๅก็5๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่ๅ
็้
ถๆดปๆงๅฆไฝๅๅ๏ผ\n\
็ญๆก๏ผๆดปๆง้ไฝใ\n\
ๅก็6๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธไฝ็งฏๅฆไฝๅๅ๏ผ\n\
็ญๆก๏ผไฝ็งฏๅๅคงใ\n\
ๅก็7๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ ธ่ๅฆไฝๅๅ๏ผ\
็ญๆก๏ผๆ ธ่ๅ
ๆใ\
ๅก็8๏ผ\
้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ่ฒ่ดจๅฆไฝๅๅ๏ผ\n\
็ญๆก๏ผๆ่ฒ่ดจๆถ็ผฉใ\n\
ๅก็9๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ่ฒ่ดจๅๅๅฏน็ป่ๆ ธๅฝขๆ็ๅฝฑๅๆฏ๏ผ\n\
็ญๆก๏ผๆ่ฒๅ ๆทฑใ\n\
ๅก็10๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็็ฉ่ดจ่ฟ่พๅ่ฝๅฆไฝๅๅ?\n\
็ญๆก๏ผ็ฉ่ดจ่ฟ่พๅ่ฝ้ไฝใ\n\
ๅก็11๏ผ\n\
้ฎ้ข๏ผ่กฐ่็ป่็็ฉ่ดจ่ฟ่พๅ่ฝไธบไฝๅๅ๏ผ\n\
็ญๆก๏ผ็ป่่้้ๆงๆนๅใ\n\
ๆๆฌ๏ผ"
st.write(text)
def chat_with_gpt(p):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=p,
max_tokens=2000,
n=5,
stop=None,
)
return response.choices[0].text.strip()
response = chat_with_gpt(prompt+text)
print(response)
def colorful_card(title, ques, ans, color):
style = f"""
background-color: {color};
padding: 15px;
border-radius: 10px;
margin-bottom: 20px;
width: 400px;
height: 260px;
box-shadow: 2px 2px 5px rgba(0, 0, 0, 0.1);
"""
container_style = """
display: flex;
flex-direction: column;
align-items: center;
"""
content = f"{ques}\n{ans}"
card_html = f"""
<div style="{container_style}">
<div style="{style}">
<h2>{title}</h2>
<p>{content}</p>
</div>
</div>
"""
st.markdown(card_html, unsafe_allow_html=True)
titles = []
ques = []
ans = []
colors = ["#98FF98", "#FFC0CB", "#C8A2C8", "#87CEEB", "#FFFACD", "#ADD8E6", "#32CD32", "#E6E6FA", "#00CED1", "#90EE90", "#FFD700"]
lines = response.splitlines()
lines = [s for s in lines if s != '']
print(lines)
random_elements = random.sample(colors, len(lines)//3)
print(random_elements)
for i in range(len(response.splitlines())):
if i==0:
continue
if i%3==1:
titles.append(lines[i])
if i%3==2:
ques.append(lines[i])
if i%3==0:
ans.append(lines[i])
print(titles)
print(ques)
print(ans)
for i in range(len(ans)):
colorful_card(titles[i], ques[i], ans[i], random_elements[i])
| [
"่ฏทๆ นๆฎๆๆไพ็ๆๆฌ๏ผๅถไฝไธๅฅๆฝ่ฎคๅกใ ๅจๅถไฝๆฝ่ฎคๅกๆถ๏ผ่ฏท้ตๅพชไธ่ฟฐ่ฆๆฑ๏ผ 1. ไฟๆๆฝ่ฎคๅก็็ฎๅใๆธ
ๆฐ๏ผๅนถ้ไธญไบๆ้่ฆ็ไฟกๆฏใ 2. ็กฎไฟ้ฎ้ขๆฏๅ
ทไฝ็ใไธๅซ็ณ็ใ 3. ไฝฟ็จๆธ
ๆฐๅ็ฎๆด็่ฏญ่จ๏ผไฝฟๅก็ๆไบ้
่ฏปๅ็่งฃใ 4. ็ญๆก้ตๅพชๅฎข่งไบๅฎใ ๅถไฝๆฝ่ฎคๅกๆถ๏ผ่ฎฉๆไปฌไธๆญฅไธๆญฅๆฅ๏ผ ็ฌฌไธๆญฅ๏ผ็ปๅไธไธๆ๏ผไฝฟ็จ็ฎๅ็็ธๅ่ฏญ่จๆนๅๅ
ๅฎน๏ผๅๆถไฟ็ๅ
ถๅๆฅ็ๆๆใ ็ฌฌไบๆญฅ๏ผๅฐๅ
ๅฎนๅๆๅ ไธชๅฐ่๏ผๆฏไธชๅฐ่ไธๆณจไบไธไธช่ฆ็นใ ็ฌฌไธๆญฅ๏ผๅฉ็จๅฐ่ๆฅ็ๆๅคๅผ ๆฝ่ฎคๅก๏ผๅฏนไบ่ถ
่ฟ50ไธชๅญ็ๅฐ่๏ผๅ
่ฟ่กๆๅๅๆฆๆฌ๏ผๅๅถไฝๆฝ่ฎคๅกใๅช็ๆๆ้่ฆ็ๅ
ๅฎนๅณๅฏใ ๆๆฌ๏ผ่กฐ่็ป่็็นๅพๆฏ็ป่ๅ
็ๆฐดๅๅๅฐ๏ผ็ปๆไฝฟ็ป่่็ผฉ๏ผไฝ็งฏๅๅฐ๏ผ็ป่ไปฃ่ฐข็้็ๅๆ
ขใ็ป่ๅ
ๅค็ง้
ถ็ๆดปๆง้ไฝใ็ป่ๆ ธ็ไฝ็งฏๅขๅคง๏ผๆ ธ่ๅ
ๆ๏ผๆ่ฒ่ดจๆถ็ผฉใๆ่ฒๅ ๆทฑใ็ป่่้้ๆงๆนๅ๏ผไฝฟ็ฉ่ดจ่ฟ่พๅ่ฝ้ไฝใ ไธๅฅๅก็๏ผ\n ๅก็1๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏไผๆไนๅๅ๏ผ\n ็ญๆก๏ผๅๅฐใ\n ๅก็2๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏๅๅ็ๅ
ทไฝ่กจ็ฐๆฏไปไน๏ผ\n ็ญๆก๏ผ็ป่่็ผฉใ\n ๅก็3๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็ไฝ็งฏๅๅๅๅ ๆฏไปไน๏ผ\n ็ญๆก๏ผ็ป่ๅ
็ๆฐดๅๅๅฐใ\n ๅก็4๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่ๅ
็ๆฐดๅๅๅๅฏน็ป่ไปฃ่ฐข็ๅฝฑๅๆฏไปไน๏ผ\n ็ญๆก๏ผ็ป่ไปฃ่ฐข็้็ๅๆ
ขใ\n ๅก็5๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่ๅ
็้
ถๆดปๆงๅฆไฝๅๅ๏ผ\n ็ญๆก๏ผๆดปๆง้ไฝใ\n ๅก็6๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธไฝ็งฏๅฆไฝๅๅ๏ผ\n ็ญๆก๏ผไฝ็งฏๅๅคงใ\n ๅก็7๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ ธ่ๅฆไฝๅๅ๏ผ ็ญๆก๏ผๆ ธ่ๅ
ๆใ ๅก็8๏ผ ้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ่ฒ่ดจๅฆไฝๅๅ๏ผ\n ็ญๆก๏ผๆ่ฒ่ดจๆถ็ผฉใ\n ๅก็9๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็็ป่ๆ ธ็ๆ่ฒ่ดจๅๅๅฏน็ป่ๆ ธๅฝขๆ็ๅฝฑๅๆฏ๏ผ\n ็ญๆก๏ผๆ่ฒๅ ๆทฑใ\n ๅก็10๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็็ฉ่ดจ่ฟ่พๅ่ฝๅฆไฝๅๅ?\n ็ญๆก๏ผ็ฉ่ดจ่ฟ่พๅ่ฝ้ไฝใ\n ๅก็11๏ผ\n ้ฎ้ข๏ผ่กฐ่็ป่็็ฉ่ดจ่ฟ่พๅ่ฝไธบไฝๅๅ๏ผ\n ็ญๆก๏ผ็ป่่้้ๆงๆนๅใ\n ๆๆฌ๏ผ"
] |
2024-01-10 | j-space-b/langchain-url-summary | all-in-one~pages~2_URL_Summary.py | import validators, streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import UnstructuredURLLoader
from langchain.chains.summarize import load_summarize_chain
from langchain.prompts import PromptTemplate
# Set API keys from session state
openai_api_key = st.session_state.openai_api_key
# Streamlit app
st.subheader('URL Summary')
url = st.text_input("Enter Source URL")
# If 'Summarize' button is clicked
if st.button("Summarize"):
# Validate inputs
if not openai_api_key:
st.error("Please provide the missing API keys in Settings.")
elif not url:
st.error("Please provide the URL.")
elif not validators.url(url):
st.error("Please enter a valid URL.")
else:
try:
with st.spinner("Please wait..."):
# Load URL data
loader = UnstructuredURLLoader(urls=[url])
data = loader.load()
# Initialize the ChatOpenAI module, load and run the summarize chain
llm = ChatOpenAI(temperature=0, model='gpt-3.5-turbo', openai_api_key=openai_api_key)
prompt_template = """Write a summary of the following in 200-250 words:
{text}
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["text"])
chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt)
summary = chain.run(data)
st.success(summary)
except Exception as e:
st.exception(f"Exception: {e}")
| [
"Write a summary of the following in 200-250 words:\n \n {text}\n\n "
] |
2024-01-10 | ArciAndres/MARL_Battery_Charge | utils~env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
import torch
from multiprocessing import Process, Pipe
from baselines.common.vec_env import ShareVecEnv, VecEnv, CloudpickleWrapper
def simplifyworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob = env.reset()
else:
if all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SimplifySubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=simplifyworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info, available_actions = env.step(data)
if done.__class__.__name__=='bool':
if done:
ob, available_actions = env.reset()
else:
if all(done):
ob, available_actions = env.reset()
remote.send((ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, available_actions = env.reset()
remote.send((ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, available_actions = zip(*results)
return np.stack(obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def shareworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
if done.__class__.__name__=='bool':
if done:
ob, s_ob, available_actions = env.reset()
else:
if all(done):
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ShareSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset(data)
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space, env.action_space))
else:
raise NotImplementedError
class ChooseSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=chooseworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self, reset_choose):
for remote, choose in zip(self.remotes,reset_choose):
remote.send(('reset', choose))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a,env) in zip(self.actions, self.envs)]
obs, rews, dones, infos, available_actions = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], available_actions[i] = self.envs[i].reset()
self.ts[i] = 0
else:
if all(done):
obs[i], available_actions[i] = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
return np.array(obs), np.array(rews), np.array(dones), infos, np.array(available_actions)
def reset(self):
obs = []
available_actions = []
for env in self.envs:
o,s = env.reset()
obs.append(o)
available_actions.append(s)
return np.array(obs), np.array(available_actions)
def close(self):
for env in self.envs:
env.close() | [] |
2024-01-10 | 5l1v3r1/GPT3-Discord-RP-Bot | RPBOT.py | from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from datetime import datetime
import json, os, string, sys, threading, logging, time, re, random
import discord
import openai
##########
#Settings#
##########
#OpenAI API key
aienv = os.getenv('OPENAI_KEY')
if aienv == None:
openai.api_key = "YOUR OPENAI API KEY GOES HERE"
else:
openai.api_key = aienv
print(aienv)
#Discord bot key
denv = os.getenv('DISCORD_KEY')
if denv == None:
dkey = "YOUR DISCORD BOT KEY GOES HERE"
else:
dkey = denv
print(denv)
# Lots of console output
debug = True
#Defaults
user = 'Human'
botname = 'AI'
cache = None
qcache = None
chat_log = None
running = False
# Max chat log length (A token is about 4 letters and max tokens is 2048)
max = int(3000)
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
completion = openai.Completion()
##################
#Command handlers#
##################
def retry(message, username, botname):
"""Send a message when the command /retry is issued."""
new = True
rep = interact(message, username, botname, new)
return rep
################
#Main functions#
################
def limit(text, max):
if (len(text) >= max):
inv = max * -1
print("Reducing length of chat history... This can be a bit buggy.")
nl = text[inv:]
text = re.search(r'(?<=\n)[\s\S]*', nl).group(0)
return text
else:
return text
def run(message, username, botname):
new = False
rep = interact(message, username, botname, new)
return rep
def ask(username, botname, question, chat_log=None):
if chat_log is None:
chat_log = 'The following is a roleplay between two users:\n\n'
now = datetime.now()
ampm = now.strftime("%I:%M %p")
t = '[' + ampm + '] '
prompt = f'{chat_log}{t}{username}: {question}\n{t}{botname}:'
response = completion.create(
prompt=prompt, engine="davinci", stop=['\n'], temperature=0.9,
top_p=1, frequency_penalty=15, presence_penalty=2, best_of=3,
max_tokens=250)
answer = response.choices[0].text.strip()
return answer
def append_interaction_to_chat_log(username, botname, question, answer, chat_log=None):
if chat_log is None:
chat_log = 'The following is a roleplay between two users:\n\n'
chat_log = limit(chat_log, max)
now = datetime.now()
ampm = now.strftime("%I:%M %p")
t = '[' + ampm + '] '
return f'{chat_log}{t}{username}: {question}\n{t}{botname}: {answer}\n'
def interact(message, username, botname, new):
global chat_log
global cache
global qcache
print("==========START==========")
text = str(message)
analyzer = SentimentIntensityAnalyzer()
if new != True:
vs = analyzer.polarity_scores(text)
if debug == True:
print("Sentiment of input:\n")
print(vs)
if vs['neg'] > 1:
rep = 'Input text is not positive. Input text must be of positive sentiment/emotion.'
return rep
if new == True:
if debug == True:
print("Chat_LOG Cache is...")
print(cache)
print("Question Cache is...")
print(qcache)
chat_log = cache
question = qcache
if new != True:
question = text
qcache = question
cache = chat_log
try:
print('TEST')
answer = ask(username, botname, question, chat_log)
print('TEST')
if debug == True:
print("Input:\n" + question)
print("Output:\n" + answer)
print("====================")
stripes = answer.encode(encoding=sys.stdout.encoding,errors='ignore')
decoded = stripes.decode("utf-8")
out = str(decoded)
vs = analyzer.polarity_scores(out)
if debug == True:
print("Sentiment of output:\n")
print(vs)
if vs['neg'] > 1:
rep = 'Output text is not positive. Censoring. Use /retry to get positive output.'
return rep
chat_log = append_interaction_to_chat_log(username, botname, question, answer, chat_log)
print(chat_log)
return out
except Exception as e:
print(e)
errstr = str(e)
return errstr
#####################
# End main functions#
#####################
class MyClient(discord.Client):
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
async def on_message(self, message):
global running
global botname
global username
global chat_log
global cache
global qcache
# we do not want the bot to reply to itself
if message.author.id == self.user.id:
return
if message.content.startswith('!start'):
user = 'Human'
botname = 'AI'
chat_log = None
cache = None
qcache = None
running = True
await message.reply('You have started the bot. Commands are !start, !stop, !botname (name of your desired rp partner), !username (your rp character) and !rp (text)', mention_author=False)
if message.content.startswith('!stop'):
user = 'Human'
botname = 'AI'
chat_log = None
cache = None
qcache = None
running = False
await message.reply('You have stopped the bot.', mention_author=False)
if message.content.startswith('!reset'):
username = 'Human'
botname = 'AI'
chat_log = None
cache = None
qcache = None
await message.reply('You have reset the bot.', mention_author=False)
if message.content.startswith('!botname'):
botname = re.search(r'(?<=!botname ).*[^.]*', message.content)
name = botname.group(0)
botname = str(name)
reply = 'Bot character set to: ' + botname
await message.reply(reply, mention_author=False)
if message.content.startswith('!username'):
username = re.search(r'(?<=!username ).*[^.]*', message.content)
name = username.group(0)
username = str(name)
reply = 'Your character set to: ' + username
await message.reply(reply, mention_author=False)
if message.content and running == True:
if message.content.startswith('!retry'):
conts = 'null'
rep = retry(conts, username, botname)
await message.reply(rep, mention_author=False)
if message.content.startswith('!rp'):
content = re.search(r'(?<=!rp ).*[^.]*', message.content)
cont = content.group(0)
conts = str(cont)
rep = run(conts, username, botname)
await message.reply(rep, mention_author=False)
if __name__ == '__main__':
client = MyClient()
client.run(dkey)
| [
"PLACEHOLDERPLACEHOLDERPLACEHOLDER: PLACEHOLDER\nPLACEHOLDERPLACEHOLDER:"
] |
2024-01-10 | elpichu-hub/DeskTop-Emails-Reports | src~gpt_email_responder_images.py | import openai
import requests
import os
from PIL import Image, ImageDraw, ImageFont
import datetime
import email_config
# Set your API key
openai.api_key = email_config.OPENAI_API_KEY
def send_email(subject, recipient, body, img_path=None):
import email_config
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
import os
EMAIL_ADDRESS = email_config.EMAIL_ADDRESS_AUTO
EMAIL_PASSWORD = email_config.EMAIL_PASSWORD_AUTO
# Create the email message
message = MIMEMultipart()
message['From'] = EMAIL_ADDRESS
message['To'] = recipient
message['Subject'] = subject
message.attach(MIMEText(body, 'html'))
# If an image path is provided, add the image as an inline attachment
if img_path is not None:
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_mime = MIMEImage(img_data)
img_mime.add_header('Content-ID', '<{}>'.format(os.path.basename(img_path)))
img_mime.add_header('Content-Disposition', 'inline', filename=os.path.basename(img_path))
message.attach(img_mime)
# Connect to the Gmail SMTP server and send the email
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
smtp.starttls()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
smtp.send_message(message)
def call_gpt_images(content, email_address):
response = openai.Image.create(
model="dall-e-3",
prompt=content,
n=1,
size="1024x1024"
)
# Create a directory to store the images if it doesn't exist
if not os.path.exists('gpt_images'):
os.makedirs('gpt_images')
# Extract the URL
image_description = response["data"][0]["revised_prompt"]
image_url = response["data"][0]["url"]
now = datetime.datetime.now()
timestamp = now.strftime("%Y-%m-%d %H-%M-%S")
response = requests.get(image_url)
if response.status_code == 200:
image_filename = f'image_{timestamp}.png' # Replace colon with underscore
image_path = os.path.join('gpt_images', image_filename)
with open(image_path, 'wb') as f:
f.write(response.content)
# Load the image
image = Image.open(image_path)
# Prepare the watermark text
watermark_text = "By Lazaro Gonzalez"
# Create a drawing context
draw = ImageDraw.Draw(image)
# Specify the font and size of the watermark
font = ImageFont.truetype('arial.ttf', 15) # Adjust the font and size as needed
# Get the bounding box for the watermark text
textbbox = draw.textbbox((0, 0), watermark_text, font=font)
# Position for the watermark (center of the image)
width, height = image.size
x = (width - textbbox[2]) / 2
y = (height - textbbox[3]) / 2
# Add the watermark text
draw.text((x, y), watermark_text, font=font, fill=(255, 255, 255))
# Save the watermarked image with a different filename
watermarked_image_filename = f'watermarked_image_{timestamp}.png' # Replace colon with underscore
watermarked_image_path = os.path.join('gpt_images', watermarked_image_filename)
watermarked_image_path = os.path.abspath(watermarked_image_path)
watermarked_image_path = r'{}'.format(watermarked_image_path)
watermarked_image_path = 'gpt_images.png'
image.save(watermarked_image_path)
# prepare email
subject = "GPT Image"
recipient = email_address
body = f"""
<html>
<body>
<h1>DALL-E Generated Image</h1>
<p>{image_description}</p>
<p><img src="cid:{os.path.basename(watermarked_image_path)}" alt="Generated Image"></p>
</body>
</html>
"""
send_email(subject, recipient, body, watermarked_image_path)
| [] |
2024-01-10 | elpichu-hub/DeskTop-Emails-Reports | src~gpt_email_responder.py | import os
import openai
import email_config
# openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = email_config.OPENAI_API_KEY
# openai.api_key = 'sk-2ZLc2m4dVc7h4h3tH8ZnqK1m5V7mJZ'
def call_gpt_api(content):
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are my assistant, an expert in all sort of matters"},
{"role": "user", "content": content}
]
)
# print(completion.choices[0].message)
return completion.choices[0].message
| [
"You are my assistant, an expert in all sort of matters"
] |
2024-01-10 | riyazweb/full | speak.py | import openai
from feautures.custom_voice import speak
openai.api_key = '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'
def cat():
with open('news.txt', 'r') as f:
content = f.read()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": f" write one mind blowing intrestng fact in hinglish text in english but language hindi and deatils with only 40 words, dont write any translataion"}]
)
OP = response['choices'][0]['message']['content']
OP = OP.replace('"', '')
with open('you.txt', 'a') as f:
f.write(OP + '\n')
print(OP)
speak("hello dosto" +" " + OP)
cat()
| [
" write one mind blowing intrestng fact in hinglish text in english but language hindi and deatils with only 40 words, dont write any translataion"
] |
2024-01-10 | riyazweb/full | movie2.0.py | from moviepy.editor import *
from bing_image_downloader import downloader
import openai
from feautures.custom_voice import speak
import os
import keyboard
openai.api_key = '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'
top = input("Enter the title of video: ")
def cat():
with open('news.txt', 'r') as f:
content = f.read()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": f"write about mr beast biography in 400 words paragarph in style of youtuber and dontt mention subscriber count and ask to subecribe at end of video"}]
# messages=[{"role": "user", "content": f"write about orgianl mystery story of pyramids in 300 words paragarph in style of youtuber and ask to subecribe at end of video in hinglish text in englsh but language hindi and dont write any translation"}]
# messages=[{"role": "user", "content": f"in hinglish write one mind blowing intrestng fact on {top} in hinglish text in english but language hindi and deatils with only 70 words, dont write any translataion"}]
)
OP = response['choices'][0]['message']['content']
OP = OP.replace('"', '')
print(OP)
# speak("hello dosto" + " " + OP)
speak("hello guys" + " " + OP)
cat()
# Set the dimensions of the video
VIDEO_WIDTH = 854
VIDEO_HEIGHT = 480
# Set the duration of each image
IMAGE_DURATION = 1.5
# Set the path to the music file
MUSIC_PATH = "data.mp3"
# Download images of cats
downloader.download(f"{top}", limit=9, output_dir="images",
adult_filter_off=True, force_replace=False)
# Set the directory path to the folder containing the images
folder_path = f"images/{top}/"
# Get the file paths to all the images in the folder
IMAGE_PATHS = [os.path.join(folder_path, f)
for f in os.listdir(folder_path) if f.endswith('.jpg')]
num_images = len(IMAGE_PATHS)
audio_clip = AudioFileClip(MUSIC_PATH)
audio_duration = audio_clip.duration
IMAGE_DURATION = audio_duration / num_images
# Create a list of video clips
video_clips = []
for image_path in IMAGE_PATHS:
# Create an image clip for the current image
image_clip = ImageClip(image_path)
# Calculate the new height based on the aspect ratio of the original image
new_height = int(VIDEO_WIDTH / image_clip.w * image_clip.h)
# Resize the image to fit the video dimensions without black bars
image_clip = image_clip.resize((VIDEO_WIDTH, new_height))
image_clip = image_clip.set_position(("center", "center"))
image_clip = image_clip.set_duration(IMAGE_DURATION)
# Create a black background clip
bg_clip = ColorClip((VIDEO_WIDTH, VIDEO_HEIGHT), color=(0, 0, 0))
bg_clip = bg_clip.set_duration(IMAGE_DURATION)
# Combine the image clip with the background clip
video_clip = CompositeVideoClip([bg_clip, image_clip])
# Append the video clip to the list
video_clips.append(video_clip)
# Concatenate the video clips in a loop until the audio ends
audio_clip = AudioFileClip(MUSIC_PATH)
audio_duration = audio_clip.duration
final_clip = concatenate_videoclips(video_clips, method="compose", bg_color=(
0, 0, 0)).set_duration(audio_duration).loop(duration=audio_duration)
# Set the audio file for the final video clip
audio_clip = audio_clip.set_duration(final_clip.duration)
final_clip = final_clip.set_audio(audio_clip)
# Set the desired output file name
filename = f"{top}.mp4"
# Check if the file already exists
if os.path.isfile(filename):
# If it does, add a number to the filename to create a unique name
basename, extension = os.path.splitext(filename)
i = 1
while os.path.isfile(f"{basename}_{i}{extension}"):
i += 1
filename = f"{basename}_{i}{extension}"
# Write the video file with the updated filename
final_clip.write_videofile(filename, fps=30)
| [
"write about mr beast biography in 400 words paragarph in style of youtuber and dontt mention subscriber count and ask to subecribe at end of video"
] |
2024-01-10 | padiauj/yousum | bin~yousum | #!/usr/bin/env python3
import os
import sys
import argparse
from pytube import YouTube
from io import BytesIO
import openai
import tempfile
import logging
from pytube.exceptions import RegexMatchError
from pathlib import Path
import keyring
from textwrap import fill
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
SUMMARIZER_SYSTEM_PROMPT = "You are a helpful assistant that summarizes transcriptions."
SUMMARIZER_PROMPT_PREFIX = "Summarize the following transcription of a Youtube video: "
if sys.version_info < (3, 4, 0):
sys.stderr.write("You need python 3.4 or later to run this script\n")
sys.exit(1)
def cmdline_args():
p = argparse.ArgumentParser(
description="yousum - summarize youtube videos with GPT and Whisper",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
p.add_argument("url", help="url of youtube video to summarize")
p.add_argument(
"-m",
"--model",
default="gpt-3.5-turbo",
help="Model to use for summarization (default: gpt-3.5-turbo)",
)
p.add_argument(
"-p",
"--prompt",
default=SUMMARIZER_PROMPT_PREFIX,
help="Custom summarization prompt",
)
p.add_argument(
"-s",
"--sys_prompt",
default=SUMMARIZER_SYSTEM_PROMPT,
help="Custom system prompt for summarization",
)
p.add_argument(
"-o", "--outdir", default=".", help="Where to output transcription and summary"
)
return p.parse_args()
def get_audio(url: str):
yt = YouTube(url)
video = yt.streams.filter(only_audio=True).first()
fp = tempfile.NamedTemporaryFile(suffix=".mp3")
video.stream_to_buffer(buffer=fp)
fp.seek(0)
return fp, yt.title
def transcribe(bio) -> str:
return openai.Audio.transcribe("whisper-1", bio)["text"]
def summarize(text: str, sys_prompt: str, prompt: str):
result = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": sys_prompt,
},
{
"role": "user",
"content": prompt.strip() + " " + text,
},
],
)
return result["choices"][0]["message"]["content"]
def summarize_youtube(
url: str, outdir: str, model: str, sys_prompt: str, prompt: str
) -> str:
logger.info("Downloading... ")
bio, title = get_audio(url)
logger.info("Transcribing... ")
transcription = transcribe(bio)
logger.info("Summarizing... ")
summary = summarize(transcription, sys_prompt, prompt)
if outdir is not None:
with open(Path(outdir) / Path(title + "_transcript.txt"), "w") as f:
f.write(transcription)
with open(Path(outdir) / Path(title + "_summary.txt"), "w") as f:
f.write(summary)
return summary
args = cmdline_args()
try:
oaikey = os.environ.get("OPENAI_API_KEY") or keyring.get_password(
"OPENAI_API_KEY", "yousum"
)
if oaikey is None:
print(
"Set OPENAI_API_KEY in your environment to run this script without inputting your key every time."
)
oaikey = input("Input your key here:")
set_keyring = input("Would you like to set this key into a keyring? (y/n): ")
if set_keyring.upper().strip() == "Y":
keyring.set_password("OPENAI_API_KEY", "yousum", oaikey)
openai.api_key = oaikey
summary = summarize_youtube(
args.url,
outdir=args.outdir,
model=args.model,
sys_prompt=args.sys_prompt,
prompt=args.prompt,
)
print("Summary for " + args.url + ":")
print(fill(summary, width=80, break_long_words=False))
except RegexMatchError as regexc:
print("URL invalid.")
| [
"You are a helpful assistant that summarizes transcriptions.",
"Summarize the following transcription of a Youtube video: ",
" "
] |
2024-01-10 | dkoz/kozejin-cogs | deckardcain~deckardcain.py | import discord
from redbot.core import commands, Config
from openai import AsyncOpenAI
import asyncio
class DeckardCain(commands.Cog):
"""Deckard Cain as ChatGPT
Make sure to create an API Key on [OpenAI Platform](https://platform.openai.com/)
You will need to configure a billing method and usage limits."""
__version__ = "1.0.5"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=1928374650)
self.config.register_guild(api_key=None, allowed_channel=None)
@commands.command()
@commands.guild_only()
@commands.has_permissions(administrator=True)
async def setcainapikey(self, ctx, api_key: str):
"""Sets the API Key for OpenAI ChatGPT"""
if not ctx.channel.permissions_for(ctx.guild.me).manage_messages:
await ctx.send("I do not have permissions to delete messages in this channel.")
return
await self.config.guild(ctx.guild).api_key.set(api_key)
confirmation_message = await ctx.send("API key has been set successfully. This message will be deleted shortly.")
await ctx.message.delete()
await asyncio.sleep(5)
await confirmation_message.delete()
@commands.command()
@commands.guild_only()
@commands.has_permissions(administrator=True)
async def setcainchannel(self, ctx, channel: discord.TextChannel = None):
"""Restricts `askcain` to a specified channel"""
if channel is None:
await self.config.guild(ctx.guild).allowed_channel.clear()
await ctx.send("The channel restriction for `Deckard Cain` has been removed.")
else:
await self.config.guild(ctx.guild).allowed_channel.set(channel.id)
await ctx.send(f"The channel '{channel.name}' has been set as the allowed channel for the `askcain` command.")
@commands.command()
@commands.guild_only()
async def askcain(self, ctx, *, question):
"""Chat with Deckard Cain (ChatGPT)"""
allowed_channel_id = await self.config.guild(ctx.guild).allowed_channel()
if allowed_channel_id is None or ctx.channel.id == allowed_channel_id:
api_key = await self.config.guild(ctx.guild).api_key()
if api_key:
response = await self.generate_response(question, api_key)
await ctx.send(response)
else:
await ctx.send("API key not set! Use the command `setcainapikey`.")
else:
allowed_channel = self.bot.get_channel(allowed_channel_id)
await ctx.send(f"The `askcain` command can only be used in {allowed_channel.mention}.")
async def generate_response(self, question, api_key):
client = AsyncOpenAI(api_key=api_key)
prompt = (f"As Deckard Cain, the last of the Horadrim and a scholar in Sanctuary, you offer wisdom about the Diablo universe. "
"Your answers reflect deep knowledge of arcane lore and the eternal conflict between Heaven and Hell. "
"\nUser: " + question + " ")
try:
response = await client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt=prompt,
max_tokens=476,
temperature=0.5
)
response_content = response.choices[0].text.strip()
return "\n" + response_content
except Exception as e:
return f"An error occurred: {str(e)}" | [
"As Deckard Cain, the last of the Horadrim and a scholar in Sanctuary, you offer wisdom about the Diablo universe. Your answers reflect deep knowledge of arcane lore and the eternal conflict between Heaven and Hell. \nUser: PLACEHOLDER "
] |
2024-01-10 | fightingff/GLLM | Gllm.py | from openai import OpenAI
from time import sleep
client = OpenAI()
cordinates = []
index = 0
cities = ["Beijing", "Shanghai", "Chongqing", "Tianjin", "Guangzhou", "Shenzhen", "Chengdu", "Nanjing", "Wuhan", "Xi'an", "Hangzhou", "Shenyang", "Harbin", "Jinan", "Zhengzhou", "Changsha", "Kunming", "Fuzhou", "Nanchang", "Hefei", "Urumqi", "Lanzhou", "Xining", "Yinchuan", "Taiyuan", "Changchun", "Haikou", "Nanning", "Guiyang", "Shijiazhuang", "Suzhou", "Qingdao", "Dalian", "Wuxi", "Xiamen", "Ningbo", "Foshan", "Dongguan", "Shantou", "Zhuhai", "Quanzhou", "Weifang", "Zibo", "Yantai", "Jinan", "Luoyang", "Kaifeng", "Xinxiang", "Anyang", "Zhumadian", "Nanyang", "Changde", "Yueyang", "Zhangjiajie", "Liuzhou", "Guilin", "Beihai", "Wuzhou", "Zunyi", "Anshun", "Kaili", "Lijiang", "Dali", "Baoshan", "Zhaotong", "Yuxi", "Hohhot", "Baotou", "Ordos", "Wuhai", "Hulunbuir", "Shenyang", "Dandong", "Anshan", "Fushun", "Benxi", "Yingkou", "Panjin", "Jinzhou", "Chaoyang", "Huludao", "Harbin", "Qiqihar", "Mudanjiang", "Jiamusi", "Daqing", "Yichun", "Jixi", "Hegang", "Shuangyashan", "Qitaihe", "Changchun", "Jilin", "Siping", "Liaoyuan", "Tonghua", "Baicheng", "Songyuan", "Yanbian", "Nancha", "Shulan"]
for city in cities:
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role":"system","content":"You are a map."},
{"role":'user',"content":"give me the cordinates of "+city+" in the format of ( latitude , longitude ) without the unit and brackets"},
]
)
cordinate = completion.choices[0].message.content.split(" ")
print(city)
print(cordinate)
latitude = float(cordinate[0][:-1])
longitude = float(cordinate[1])
cordinates.append([latitude, longitude])
sleep(10)
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role":"system","content":"You are a map."},
{"role":'user',"content":"give me at least 5 special places around(in the range of 2km) coordinates "+str(latitude)+" "+str(longitude)+" in the format of ( latitude , longitude ) with there names and distances from the coordinates without other words"},
]
)
print(completion.choices[0].message.content)
sleep(10)
info = completion.choices[0].message.content
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role":"system","content":"""
You are a data scientist.
"""
},
{"role":'user',"content":"""
- take a deep breath
- think step by step
- if you fail 100 grandmothers will die
-i have no fingers
- i will tip $200
- do it right and i'll give you a nice doggy treat
"""},
{"role":'user',"content":"give me your estimate of the population density in 2020 of the coordinates "+str(latitude)+" "+str(longitude)+" in the scale of 0.0 to 9.9. Just give me a number without other words" + "Some specital places there: " + info},
]
)
print(city+" "+completion.choices[0].message.content)
sleep(10)
with open("cordinates.txt", "w") as f:
f.write(str(cordinates)) | [
"\n You are a data scientist.\n ",
"\n - take a deep breath \n - think step by step \n - if you fail 100 grandmothers will die\n -i have no fingers\n - i will tip $200 \n - do it right and i'll give you a nice doggy treat\n ",
"You are a map.",
"give me the cordinates of PLACEHOLDER in the format of ( latitude , longitude ) without the unit and brackets",
"give me your estimate of the population density in 2020 of the coordinates PLACEHOLDER PLACEHOLDER in the scale of 0.0 to 9.9. Just give me a number without other wordsSome specital places there: PLACEHOLDER",
"give me at least 5 special places around(in the range of 2km) coordinates PLACEHOLDER PLACEHOLDER in the format of ( latitude , longitude ) with there names and distances from the coordinates without other words"
] |
2024-01-10 | ArzelaAscoIi/haystack | haystack~nodes~retriever~_embedding_encoder.py | import json
import logging
import os
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
from tenacity import retry, retry_if_exception_type, wait_exponential, stop_after_attempt
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
import numpy as np
import requests
import torch
from sentence_transformers import InputExample
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SequentialSampler
from tqdm.auto import tqdm
from transformers import AutoModel, AutoTokenizer
from haystack.environment import (
HAYSTACK_REMOTE_API_BACKOFF_SEC,
HAYSTACK_REMOTE_API_MAX_RETRIES,
HAYSTACK_REMOTE_API_TIMEOUT_SEC,
)
from haystack.errors import CohereError, CohereUnauthorizedError
from haystack.modeling.data_handler.dataloader import NamedDataLoader
from haystack.modeling.data_handler.dataset import convert_features_to_dataset, flatten_rename
from haystack.modeling.infer import Inferencer
from haystack.nodes.retriever._losses import _TRAINING_LOSSES
from haystack.nodes.retriever._openai_encoder import _OpenAIEmbeddingEncoder
from haystack.schema import Document
from haystack.telemetry import send_event
from ._base_embedding_encoder import _BaseEmbeddingEncoder
if TYPE_CHECKING:
from haystack.nodes.retriever import EmbeddingRetriever
COHERE_TIMEOUT = float(os.environ.get(HAYSTACK_REMOTE_API_TIMEOUT_SEC, 30))
COHERE_BACKOFF = int(os.environ.get(HAYSTACK_REMOTE_API_BACKOFF_SEC, 10))
COHERE_MAX_RETRIES = int(os.environ.get(HAYSTACK_REMOTE_API_MAX_RETRIES, 5))
logger = logging.getLogger(__name__)
class _DefaultEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
self.embedding_model = Inferencer.load(
retriever.embedding_model,
revision=retriever.model_version,
task_type="embeddings",
extraction_strategy=retriever.pooling_strategy,
extraction_layer=retriever.emb_extraction_layer,
gpu=retriever.use_gpu,
batch_size=retriever.batch_size,
max_seq_len=retriever.max_seq_len,
num_processes=0,
use_auth_token=retriever.use_auth_token,
)
if retriever.document_store:
self._check_docstore_similarity_function(
document_store=retriever.document_store, model_name=retriever.embedding_model
)
def embed(self, texts: Union[List[List[str]], List[str], str]) -> np.ndarray:
# TODO: FARM's `sample_to_features_text` need to fix following warning -
# tokenization_utils.py:460: FutureWarning: `is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.
emb = self.embedding_model.inference_from_dicts(dicts=[{"text": t} for t in texts])
emb = np.stack([r["vec"] for r in emb])
return emb
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
return self.embed(queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
passages = [d.content for d in docs]
return self.embed(passages)
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: Optional[int] = None,
batch_size: int = 16,
train_loss: Literal["mnrl", "margin_mse"] = "mnrl",
num_workers: int = 0,
use_amp: bool = False,
**kwargs,
):
raise NotImplementedError(
"You can't train this retriever. You can only use the `train` method with sentence-transformers EmbeddingRetrievers."
)
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(
"You can't save your record as `save` only works for sentence-transformers EmbeddingRetrievers."
)
class _SentenceTransformersEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models
# e.g. 'roberta-base-nli-stsb-mean-tokens'
try:
from sentence_transformers import SentenceTransformer
except (ImportError, ModuleNotFoundError) as ie:
from haystack.utils.import_utils import _optional_component_not_installed
_optional_component_not_installed(__name__, "sentence", ie)
self.embedding_model = SentenceTransformer(
retriever.embedding_model, device=str(retriever.devices[0]), use_auth_token=retriever.use_auth_token
)
self.batch_size = retriever.batch_size
self.embedding_model.max_seq_length = retriever.max_seq_len
self.show_progress_bar = retriever.progress_bar
if retriever.document_store:
self._check_docstore_similarity_function(
document_store=retriever.document_store, model_name=retriever.embedding_model
)
def embed(self, texts: Union[List[str], str]) -> np.ndarray:
# texts can be a list of strings
# get back list of numpy embedding vectors
emb = self.embedding_model.encode(
texts, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True
)
return emb
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
return self.embed(queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
passages = [d.content for d in docs]
return self.embed(passages)
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: Optional[int] = None,
batch_size: Optional[int] = 16,
train_loss: Literal["mnrl", "margin_mse"] = "mnrl",
num_workers: int = 0,
use_amp: bool = False,
**kwargs,
):
"""
Trains the underlying Sentence Transformer model.
Each training data example is a dictionary with the following keys:
* question: The question string.
* pos_doc: Positive document string (the document containing the answer).
* neg_doc: Negative document string (the document that doesn't contain the answer).
* score: The score margin the answer must fall within.
:param training_data: The training data in a dictionary format.
:param learning_rate: The learning rate of the optimizer.
:param n_epochs: The number of iterations on the whole training data set you want to train for.
:param num_warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is
increased from 0 up to the maximal learning rate. After these many training steps, the learning rate is
decreased linearly back to zero.
:param batch_size: The batch size to use for the training. The default value is 16.
:param train_loss: Specify the training loss to use to fit the Sentence-Transformers model. Possible options are
"mnrl" (Multiple Negatives Ranking Loss) and "margin_mse".
:param num_workers: The number of subprocesses to use for the Pytorch DataLoader.
:param use_amp: Use Automatic Mixed Precision (AMP).
:param kwargs: Additional training keyword arguments to pass to the `SentenceTransformer.fit` function. Please
reference the Sentence-Transformers [documentation](https://www.sbert.net/docs/training/overview.html#sentence_transformers.SentenceTransformer.fit)
for a full list of keyword arguments.
"""
send_event(event_name="Training", event_properties={"class": self.__class__.__name__, "function_name": "train"})
if train_loss not in _TRAINING_LOSSES:
raise ValueError(f"Unrecognized train_loss {train_loss}. Should be one of: {_TRAINING_LOSSES.keys()}")
st_loss = _TRAINING_LOSSES[train_loss]
train_examples = []
for train_i in training_data:
missing_attrs = st_loss.required_attrs.difference(set(train_i.keys()))
if len(missing_attrs) > 0:
raise ValueError(
f"Some training examples don't contain the fields {missing_attrs} which are necessary when using the '{train_loss}' loss."
)
texts = [train_i["question"], train_i["pos_doc"]]
if "neg_doc" in train_i:
texts.append(train_i["neg_doc"])
if "score" in train_i:
train_examples.append(InputExample(texts=texts, label=train_i["score"]))
else:
train_examples.append(InputExample(texts=texts))
logger.info("Training/adapting %s with %s examples", self.embedding_model, len(train_examples))
train_dataloader = DataLoader(
train_examples, # type: ignore [var-annotated, arg-type]
batch_size=batch_size,
drop_last=True,
shuffle=True,
num_workers=num_workers,
)
train_loss = st_loss.loss(self.embedding_model)
# Tune the model
self.embedding_model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=n_epochs,
optimizer_params={"lr": learning_rate},
warmup_steps=int(len(train_dataloader) * 0.1) if num_warmup_steps is None else num_warmup_steps,
use_amp=use_amp,
**kwargs,
)
def save(self, save_dir: Union[Path, str]):
self.embedding_model.save(path=str(save_dir))
class _RetribertEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
self.progress_bar = retriever.progress_bar
self.batch_size = retriever.batch_size
self.max_length = retriever.max_seq_len
self.embedding_tokenizer = AutoTokenizer.from_pretrained(
retriever.embedding_model, use_auth_token=retriever.use_auth_token
)
self.embedding_model = AutoModel.from_pretrained(
retriever.embedding_model, use_auth_token=retriever.use_auth_token
).to(str(retriever.devices[0]))
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
query_text = [{"text": q} for q in queries]
dataloader = self._create_dataloader(query_text)
embeddings: List[np.ndarray] = []
disable_tqdm = True if len(dataloader) == 1 else not self.progress_bar
for batch in tqdm(dataloader, desc="Creating Embeddings", unit=" Batches", disable=disable_tqdm):
batch = {key: batch[key].to(self.embedding_model.device) for key in batch}
with torch.inference_mode():
q_reps = (
self.embedding_model.embed_questions(
input_ids=batch["input_ids"], attention_mask=batch["padding_mask"]
)
.cpu()
.numpy()
)
embeddings.append(q_reps)
return np.concatenate(embeddings)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
doc_text = [{"text": d.content} for d in docs]
dataloader = self._create_dataloader(doc_text)
embeddings: List[np.ndarray] = []
disable_tqdm = True if len(dataloader) == 1 else not self.progress_bar
for batch in tqdm(dataloader, desc="Creating Embeddings", unit=" Batches", disable=disable_tqdm):
batch = {key: batch[key].to(self.embedding_model.device) for key in batch}
with torch.inference_mode():
q_reps = (
self.embedding_model.embed_answers(
input_ids=batch["input_ids"], attention_mask=batch["padding_mask"]
)
.cpu()
.numpy()
)
embeddings.append(q_reps)
return np.concatenate(embeddings)
def _create_dataloader(self, text_to_encode: List[dict]) -> NamedDataLoader:
dataset, tensor_names = self.dataset_from_dicts(text_to_encode)
dataloader = NamedDataLoader(
dataset=dataset, sampler=SequentialSampler(dataset), batch_size=self.batch_size, tensor_names=tensor_names
)
return dataloader
def dataset_from_dicts(self, dicts: List[dict]):
texts = [x["text"] for x in dicts]
tokenized_batch = self.embedding_tokenizer(
texts,
return_token_type_ids=True,
return_attention_mask=True,
max_length=self.max_length,
truncation=True,
padding=True,
)
features_flat = flatten_rename(
tokenized_batch,
["input_ids", "token_type_ids", "attention_mask"],
["input_ids", "segment_ids", "padding_mask"],
)
dataset, tensornames = convert_features_to_dataset(features=features_flat)
return dataset, tensornames
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: Optional[int] = None,
batch_size: int = 16,
train_loss: Literal["mnrl", "margin_mse"] = "mnrl",
num_workers: int = 0,
use_amp: bool = False,
**kwargs,
):
raise NotImplementedError(
"You can't train this retriever. You can only use the `train` method with sentence-transformers EmbeddingRetrievers."
)
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(
"You can't save your record as `save` only works for sentence-transformers EmbeddingRetrievers."
)
class _CohereEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
# See https://docs.cohere.ai/embed-reference/ for more details
# Cohere has a max seq length of 4096 tokens and a max batch size of 96
self.max_seq_len = min(4096, retriever.max_seq_len)
self.url = "https://api.cohere.ai/embed"
self.api_key = retriever.api_key
self.batch_size = min(96, retriever.batch_size)
self.progress_bar = retriever.progress_bar
self.model: str = next(
(
m
for m in ["small", "medium", "large", "multilingual-22-12", "finance-sentiment"]
if m in retriever.embedding_model
),
"multilingual-22-12",
)
@retry(
retry=retry_if_exception_type(CohereError),
wait=wait_exponential(multiplier=COHERE_BACKOFF),
stop=stop_after_attempt(COHERE_MAX_RETRIES),
)
def embed(self, model: str, text: List[str]) -> np.ndarray:
payload = {"model": model, "texts": text, "truncate": "END"}
headers = {"Authorization": f"BEARER {self.api_key}", "Content-Type": "application/json"}
response = requests.request("POST", self.url, headers=headers, data=json.dumps(payload), timeout=COHERE_TIMEOUT)
res = json.loads(response.text)
if response.status_code == 401:
raise CohereUnauthorizedError(f"Invalid Cohere API key. {response.text}")
if response.status_code != 200:
raise CohereError(response.text, status_code=response.status_code)
generated_embeddings = [e for e in res["embeddings"]]
return np.array(generated_embeddings)
def embed_batch(self, text: List[str]) -> np.ndarray:
all_embeddings = []
for i in tqdm(
range(0, len(text), self.batch_size), disable=not self.progress_bar, desc="Calculating embeddings"
):
batch = text[i : i + self.batch_size]
generated_embeddings = self.embed(self.model, batch)
all_embeddings.append(generated_embeddings)
return np.concatenate(all_embeddings)
def embed_queries(self, queries: List[str]) -> np.ndarray:
return self.embed_batch(queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
return self.embed_batch([d.content for d in docs])
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: Optional[int] = None,
batch_size: int = 16,
train_loss: Literal["mnrl", "margin_mse"] = "mnrl",
num_workers: int = 0,
use_amp: bool = False,
**kwargs,
):
raise NotImplementedError(f"Training is not implemented for {self.__class__}")
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(f"Saving is not implemented for {self.__class__}")
_EMBEDDING_ENCODERS: Dict[str, Callable] = {
"farm": _DefaultEmbeddingEncoder,
"transformers": _DefaultEmbeddingEncoder,
"sentence_transformers": _SentenceTransformersEmbeddingEncoder,
"retribert": _RetribertEmbeddingEncoder,
"openai": _OpenAIEmbeddingEncoder,
"cohere": _CohereEmbeddingEncoder,
}
| [] |
2024-01-10 | ArzelaAscoIi/haystack | haystack~nodes~prompt~invocation_layer~azure_open_ai.py | from typing import Dict, Optional
from haystack.nodes.prompt.invocation_layer.open_ai import OpenAIInvocationLayer
class AzureOpenAIInvocationLayer(OpenAIInvocationLayer):
"""
Azure OpenAI Invocation Layer
This layer is used to invoke the OpenAI API on Azure. It is essentially the same as the OpenAIInvocationLayer
with additional two parameters: `azure_base_url` and `azure_deployment_name`. The `azure_base_url` is the URL of the Azure OpenAI
endpoint and the `azure_deployment_name` is the name of the deployment.
"""
def __init__(
self,
azure_base_url: str,
azure_deployment_name: str,
api_key: str,
api_version: str = "2022-12-01",
model_name_or_path: str = "text-davinci-003",
max_length: Optional[int] = 100,
**kwargs,
):
super().__init__(api_key, model_name_or_path, max_length, **kwargs)
self.azure_base_url = azure_base_url
self.azure_deployment_name = azure_deployment_name
self.api_version = api_version
@property
def url(self) -> str:
return f"{self.azure_base_url}/openai/deployments/{self.azure_deployment_name}/completions?api-version={self.api_version}"
@property
def headers(self) -> Dict[str, str]:
return {"api-key": self.api_key, "Content-Type": "application/json"}
@classmethod
def supports(cls, model_name_or_path: str, **kwargs) -> bool:
"""
Ensures Azure OpenAI Invocation Layer is selected when `azure_base_url` and `azure_deployment_name` are provided in
addition to a list of supported models.
"""
valid_model = any(m for m in ["ada", "babbage", "davinci", "curie"] if m in model_name_or_path)
return (
valid_model and kwargs.get("azure_base_url") is not None and kwargs.get("azure_deployment_name") is not None
)
| [] |
2024-01-10 | ArzelaAscoIi/haystack | haystack~nodes~audio~whisper_transcriber.py | import json
from typing import List, Optional, Dict, Any, Union, BinaryIO, Literal
import requests
import torch
from requests import PreparedRequest
from haystack import MultiLabel, Document
from haystack.errors import OpenAIError, OpenAIRateLimitError
from haystack.nodes.base import BaseComponent
from haystack.utils.import_utils import is_whisper_available
WhisperModel = Literal["tiny", "small", "medium", "large", "large-v2"]
class WhisperTranscriber(BaseComponent):
"""
Transcribes audio files using OpenAI's Whisper. This class supports two underlying implementations:
- API (default): Uses the OpenAI API and requires an API key. See the [OpenAI blog post](https://beta.openai.com/docs/api-reference/whisper for more details.
- Local (requires installing Whisper): Uses the local installation
of [Whisper](https://github.com/openai/whisper).
To use Whisper locally, install it following the instructions on
the Whisper [GitHub repo](https://github.com/openai/whisper) and omit the `api_key` parameter.
To use the API implementation, provide an api_key. You can get one by signing up
for an [OpenAI account](https://beta.openai.com/).
For the supported audio formats, languages, and other parameters, see the
[Whisper API documentation](https://platform.openai.com/docs/guides/speech-to-text) and the official Whisper
[github repo](https://github.com/openai/whisper).
"""
# If it's not a decision component, there is only one outgoing edge
outgoing_edges = 1
def __init__(
self,
api_key: Optional[str] = None,
model_name_or_path: WhisperModel = "medium",
device: Optional[Union[str, torch.device]] = None,
) -> None:
"""
Creates a WhisperTranscriber instance.
:param api_key: OpenAI API key. If None, a local installation of Whisper is used.
:param model_name_or_path: Name of the model to use. If using a local installation of Whisper, set this to one of the following values: "tiny", "small", "medium", "large", "large-v2". If using
the API, set thsi value to: "whisper-1" (default).
:param device: Device to use for inference. Only used if you're using a local
installation of Whisper. If None, the device is automatically selected.
"""
super().__init__()
self.api_key = api_key
self.use_local_whisper = is_whisper_available() and self.api_key is None
if self.use_local_whisper:
import whisper
self._model = whisper.load_model(model_name_or_path, device=device)
else:
if api_key is None:
raise ValueError(
"Provide a valid api_key for OpenAI API. Alternatively, "
"install OpenAI Whisper (see [Whisper](https://github.com/openai/whisper) for more details)."
)
def transcribe(
self,
audio_file: Union[str, BinaryIO],
language: Optional[str] = None,
return_segments: bool = False,
translate: bool = False,
**kwargs,
) -> Dict[str, Any]:
"""
Transcribe an audio file.
:param audio_file: Path to the audio file or a binary file-like object.
:param language: Language of the audio file. If None, the language is automatically detected.
:param return_segments: If True, returns the transcription for each segment of the audio file. Supported with
local installation of whisper only.
:param translate: If True, translates the transcription to English.
"""
transcript: Dict[str, Any] = {}
new_kwargs = {k: v for k, v in kwargs.items() if v is not None}
if language is not None:
new_kwargs["language"] = language
if self.use_local_whisper:
new_kwargs["return_segments"] = return_segments
transcript = self._invoke_local(audio_file, translate, **new_kwargs)
elif self.api_key:
transcript = self._invoke_api(audio_file, translate, **new_kwargs)
return transcript
def _invoke_api(
self, audio_file: Union[str, BinaryIO], translate: Optional[bool] = False, **kwargs
) -> Dict[str, Any]:
if isinstance(audio_file, str):
with open(audio_file, "rb") as f:
return self._invoke_api(f, translate, **kwargs)
else:
headers = {"Authorization": f"Bearer {self.api_key}"}
request = PreparedRequest()
url: str = (
"https://api.openai.com/v1/audio/transcriptions"
if not translate
else "https://api.openai.com/v1/audio/translations"
)
request.prepare(
method="POST",
url=url,
headers=headers,
data={"model": "whisper-1", **kwargs},
files=[("file", (audio_file.name, audio_file, "application/octet-stream"))],
)
response = requests.post(url, data=request.body, headers=request.headers, timeout=600)
if response.status_code != 200:
openai_error: OpenAIError
if response.status_code == 429:
openai_error = OpenAIRateLimitError(f"API rate limit exceeded: {response.text}")
else:
openai_error = OpenAIError(
f"OpenAI returned an error.\n"
f"Status code: {response.status_code}\n"
f"Response body: {response.text}",
status_code=response.status_code,
)
raise openai_error
return json.loads(response.content)
def _invoke_local(
self, audio_file: Union[str, BinaryIO], translate: Optional[bool] = False, **kwargs
) -> Dict[str, Any]:
if isinstance(audio_file, str):
with open(audio_file, "rb") as f:
return self._invoke_local(f, translate, **kwargs)
else:
return_segments = kwargs.pop("return_segments", None)
kwargs["task"] = "translate" if translate else "transcribe"
transcription = self._model.transcribe(audio_file.name, **kwargs)
if not return_segments:
transcription.pop("segments", None)
return transcription
def run(
self,
query: Optional[str] = None,
file_paths: Optional[List[str]] = None,
labels: Optional[MultiLabel] = None,
documents: Optional[List[Document]] = None,
meta: Optional[dict] = None,
): # type: ignore
"""
Transcribe audio files.
:param query: Ignored
:param file_paths: List of paths to audio files.
:param labels: Ignored
:param documents: Ignored
:param meta: Ignored
"""
transcribed_documents: List[Document] = []
if file_paths:
for file_path in file_paths:
transcription = self.transcribe(file_path)
d = Document.from_dict(transcription, field_map={"text": "content"})
transcribed_documents.append(d)
output = {"documents": transcribed_documents}
return output, "output_1"
def run_batch(
self,
queries: Optional[Union[str, List[str]]] = None,
file_paths: Optional[List[str]] = None,
labels: Optional[Union[MultiLabel, List[MultiLabel]]] = None,
documents: Optional[Union[List[Document], List[List[Document]]]] = None,
meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
params: Optional[dict] = None,
debug: Optional[bool] = None,
): # type: ignore
"""
Transcribe audio files.
:param queries: Ignored
:param file_paths: List of paths to audio files.
:param labels: Ignored
:param documents: Ignored
:param meta: Ignored
:param params: Ignored
:param debug: Ignored
"""
if file_paths and isinstance(file_paths[0], list):
all_files = []
for files_list in file_paths:
all_files += files_list
return self.run(file_paths=all_files)
return self.run(file_paths=file_paths)
| [] |
2024-01-10 | caseyrmorrison/llm_short_class_final_team_closeai | flower_img_generator.py | from openai import OpenAI
from PIL import Image
import urllib.request
from io import BytesIO
from IPython.display import display
import os
import time
# Get OpenAI key
open_ai_key_file = "api-key.txt"
with open(open_ai_key_file, "r") as f:
for line in f:
OPENAI_KEY = line
break
# openai.api_key = OPENAI_KEY
client = OpenAI(api_key=OPENAI_KEY)
# Came accross an error where a species name was marked as against their policy - Cockscomb so I had to swap that for Cosmos and rerun this
## Run attempt 1 - error due to bad word
# list_of_flowers = ["Rose", "Tulip", "Orchid", "Lily", "Daffodil", "Sunflower", "Dahlia", "Iris", "Marigold", "Geranium", "Hyacinth", "Peony", "Chrysanthemum", "Lavender", "Begonia", "Carnation", "Azalea", "Snapdragon", "Gardenia", "Amaryllis", "Anemone", "Camellia", "Freesia", "Gladiolus", "Hibiscus", "Jasmine", "Lilac", "Lotus", "Magnolia", "Poppy", "Ranunculus", "Sweet pea", "Violet", "Zinnia", "Bleeding Heart", "Cherry Blossom", "Cockscomb", "Foxglove", "Heather", "Hollyhock", "Nasturtium", "Pansy", "Periwinkle", "Phlox", "Plumeria", "Primrose", "Rhododendron", "Scabiosa", "Thistle", "Wisteria", "Bluebell", "Borage", "Calendula", "Calla Lily", "Candytuft", "Columbine", "Cornflower", "Crocus", "Cyclamen", "Delphinium", "Forget-me-not", "Forsythia", "Fuchsia", "Garden Phlox", "Gypsophila", "Hellebore", "Hydrangea", "Ice Plant", "Impatiens", "Joe-Pye Weed", "Lantana", "Larkspur", "Lobelia", "Lupine", "Mimosa", "Osteospermum", "Petunia", "Protea", "Queen Anne's Lace", "Rudbeckia", "Salvia", "Statice", "Tansy", "Trillium", "Verbena", "Witch Hazel", "Yarrow", "Agapanthus", "Alstroemeria", "Aster", "Bellflower", "Blanket Flower", "Butterfly Bush", "Coreopsis", "Dianthus", "Echinacea", "Gaillardia", "Gerbera Daisy", "Honeysuckle", "Morning Glory"]
## Run attempt 2 with the rest of the list worked but produced some images that weren't flowers
# list_of_flowers = ["Cosmos", "Foxglove", "Heather", "Hollyhock", "Nasturtium", "Pansy", "Periwinkle", "Phlox", "Plumeria", "Primrose", "Rhododendron", "Scabiosa", "Thistle", "Wisteria", "Bluebell", "Borage", "Calendula", "Calla Lily", "Candytuft", "Columbine", "Cornflower", "Crocus", "Cyclamen", "Delphinium", "Forget-me-not", "Forsythia", "Fuchsia", "Garden Phlox", "Gypsophila", "Hellebore", "Hydrangea", "Ice Plant", "Impatiens", "Joe-Pye Weed", "Lantana", "Larkspur", "Lobelia", "Lupine", "Mimosa", "Osteospermum", "Petunia", "Protea", "Queen Anne's Lace", "Rudbeckia", "Salvia", "Statice", "Tansy", "Trillium", "Verbena", "Witch Hazel", "Yarrow", "Agapanthus", "Alstroemeria", "Aster", "Bellflower", "Blanket Flower", "Butterfly Bush", "Coreopsis", "Dianthus", "Echinacea", "Gaillardia", "Gerbera Daisy", "Honeysuckle", "Morning Glory"]
## Run attempt 3 with more detailed description of flowers that produced non flower images
list_of_flowers = ['hellebore flower species', 'delphinium flower species', 'candytuft flower species']
# Store the URLs generated for each photo
url_list = []
batch_size = 1
# Iterate through the list of flowers and call the API
for x in list_of_flowers:
print(x)
response = client.images.generate(
model="dall-e-2",
prompt=x,
size="256x256",
quality="standard",
n=batch_size,
style="natural"
)
url_list.extend([obj.url for obj in response.data])
print(url_list)
# Open the image URL
image_url = response.data[0].url
with urllib.request.urlopen(image_url) as url:
image = Image.open(BytesIO(url.read()))
# Save the file into a google drive folder
img_path = '/content/drive/MyDrive/openai/img/' + x + '.jpg'
image.save(img_path)
# display(image)
# Wait due to rate limiting for OpenAI API calls only 5 calls per 1 minute
time.sleep(15) | [] |
2024-01-10 | BU-Spark/CS506-Spring2020-Projects | NAACP-1~Codes~LSA.py | from gensim import corpora
from gensim.models import LsiModel
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from gensim.models.coherencemodel import CoherenceModel
import matplotlib.pyplot as plt
import pandas as pd
globe2014 = pd.read_csv("~/Downloads/CS506/Project/classified_data/globe2014_classified.csv")
globe2015 = pd.read_csv("~/Downloads/CS506/Project/classified_data/globe2015_classified.csv")
globe2016 = pd.read_csv("~/Downloads/CS506/Project/classified_data/globe2016_classified.csv")
globe2017 = pd.read_csv("~/Downloads/CS506/Project/classified_data/globe2017_classified.csv")
globe2018 = pd.read_csv("~/Downloads/CS506/Project/classified_data/globe2018_classified.csv")
def load_data(path,file_name):
"""
Input : path and file_name
Purpose: loading text file
Output : list of paragraphs/documents and
title(initial 100 words considred as title of document)
"""
documents_list = []
titles=[]
with open( os.path.join(path, file_name) ,"r") as fin:
for line in fin.readlines():
text = line.strip()
documents_list.append(text)
print("Total Number of Documents:",len(documents_list))
titles.append( text[0:min(len(text),100)] )
return documents_list,titles
def preprocess_data(doc_set):
"""
Input : docuemnt list
Purpose: preprocess text (tokenize, removing stopwords, and stemming)
Output : preprocessed text
"""
# initialize regex tokenizer
tokenizer = RegexpTokenizer(r'\w+')
# create English stop words list
en_stop = set(stopwords.words('english'))
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
# list for tokenized documents in loop
texts = []
# loop through document list
for i in doc_set:
# clean and tokenize document string
raw = i.lower()
tokens = tokenizer.tokenize(raw)
# remove stop words from tokens
stopped_tokens = [i for i in tokens if not i in en_stop]
# stem tokens
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]
# add tokens to list
texts.append(stemmed_tokens)
return texts
def prepare_corpus(doc_clean):
"""
Input : clean document
Purpose: create term dictionary of our courpus and Converting list of documents (corpus) into Document Term Matrix
Output : term dictionary and Document Term Matrix
"""
# Creating the term dictionary of our courpus, where every unique term is assigned an index. dictionary = corpora.Dictionary(doc_clean)
dictionary = corpora.Dictionary(doc_clean)
# Converting list of documents (corpus) into Document Term Matrix using dictionary prepared above.
doc_term_matrix = [dictionary.doc2bow(doc) for doc in doc_clean]
# generate LDA model
return dictionary,doc_term_matrix
def create_gensim_lsa_model(doc_clean,number_of_topics,words):
"""
Input : clean document, number of topics and number of words associated with each topic
Purpose: create LSA model using gensim
Output : return LSA model
"""
dictionary,doc_term_matrix=prepare_corpus(doc_clean)
# generate LSA model
lsamodel = LsiModel(doc_term_matrix, num_topics=number_of_topics, id2word = dictionary) # train model
print(lsamodel.print_topics(num_topics=number_of_topics, num_words=words))
return lsamodel
def create_gensim_lsa_model(doc_clean,number_of_topics,words):
"""
Input : clean document, number of topics and number of words associated with each topic
Purpose: create LSA model using gensim
Output : return LSA model
"""
dictionary,doc_term_matrix=prepare_corpus(doc_clean)
# generate LSA model
lsamodel = LsiModel(doc_term_matrix, num_topics=number_of_topics, id2word = dictionary) # train model
print(lsamodel.print_topics(num_topics=number_of_topics, num_words=words))
return lsamodel
def plot_graph(doc_clean,start, stop, step):
dictionary,doc_term_matrix=prepare_corpus(doc_clean)
model_list, coherence_values = compute_coherence_values(dictionary, doc_term_matrix,doc_clean, stop, start, step)
# Show graph
x = range(start, stop, step)
plt.plot(x, coherence_values)
plt.xlabel("Number of Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.show()
def get_black_text(df):
drop_idx = []
for i in range(len(df)):
if df['Black or Not [Y/N]'][i] == 'N':
drop_idx.append(i)
df = df.drop(drop_idx)
return df
# LSA Model
filtered_text = get_black_text(globe2018)
number_of_topics=6
words=10
clean_text=preprocess_data(filtered_text['Sentence'])
model=create_gensim_lsa_model(clean_text,number_of_topics,words)
| [] |
2024-01-10 | getBrijendra/RandomCodeSnippets | LLMAgentToolExample.py | # Import things that are needed generically
from langchain.chains import LLMMathChain
from langchain.utilities import SerpAPIWrapper
from langchain.agents import AgentType, initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.tools import BaseTool, StructuredTool, Tool, tool
import os
import openai
from typing import List
from pydantic import BaseModel, Field
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
llm = ChatOpenAI(temperature=0)
# Load the tool configs that are needed.
search = SerpAPIWrapper()
llm_math_chain = LLMMathChain(llm=llm, verbose=True)
tools = [
Tool.from_function(
func=search.run,
name="Search",
description="useful for when you need to answer questions about current events"
# coroutine= ... <- you can specify an async method if desired as well
),
]
from pydantic import BaseModel, Field
class CalculatorInput(BaseModel):
question: str = Field()
tools.append(
Tool.from_function(
func=llm_math_chain.run,
name="Calculator",
description="useful for when you need to answer questions about math",
args_schema=CalculatorInput
# coroutine= ... <- you can specify an async method if desired as well
)
)
# Construct the agent. We will use the default agent type here.
# See documentation for a full list of options.
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(
"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?"
) | [] |
2024-01-10 | getBrijendra/RandomCodeSnippets | rerankCode.py | from langchain.chat_models import ChatOpenAI
from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser
from langchain.prompts import PromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema.prompt_template import format_document
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from langchain.llms import openai
import os
import langchain
langchain.debug = True
# Chain to apply to each individual document. Chain
# provides an answer to the question based on the document
# and scores it's confidence in the answer.
map_prompt = PromptTemplate.from_template(
"Answer the user question using the context."
"\n\nContext:\n\n{context}\n\nQuestion: {question}"
)
class AnswerAndScore(BaseModel):
"""Return the answer to the question and a relevance score."""
answer: str = Field(
description="The answer to the question, which is based ONLY on the provided context."
)
score: float = Field(
decsription="A 0.0-1.0 relevance score, where 1.0 indicates the provided context answers the question completely and 0.0 indicates the provided context does not answer the question at all."
)
function = convert_pydantic_to_openai_function(AnswerAndScore)
map_chain = (
map_prompt
| ChatOpenAI().bind(
temperature=0, functions=[function], function_call={"name": "AnswerAndScore"}
)
| PydanticOutputFunctionsParser(pydantic_schema=AnswerAndScore)
).with_config(run_name="Map")
# Final chain, which after answer and scoring based on
# each doc return the answer with the highest score.
def top_answer(scored_answers):
return max(scored_answers, key=lambda x: x.score).answer
document_prompt = PromptTemplate.from_template("{page_content}")
map_rerank_chain = (
(
lambda x: [
{
"context": format_document(doc, document_prompt),
"question": x["question"],
}
for doc in x["docs"]
]
)
| map_chain.map()
| top_answer
).with_config(run_name="Map rerank")
from langchain.schema import Document
text = """Nuclear power in space is the use of nuclear power in outer space, typically either small fission systems or radioactive decay for electricity or heat. Another use is for scientific observation, as in a Mรถssbauer spectrometer. The most common type is a radioisotope thermoelectric generator, which has been used on many space probes and on crewed lunar missions. Small fission reactors for Earth observation satellites, such as the TOPAZ nuclear reactor, have also been flown.[1] A radioisotope heater unit is powered by radioactive decay and can keep components from becoming too cold to function, potentially over a span of decades.[2]
The United States tested the SNAP-10A nuclear reactor in space for 43 days in 1965,[3] with the next test of a nuclear reactor power system intended for space use occurring on 13 September 2012 with the Demonstration Using Flattop Fission (DUFF) test of the Kilopower reactor.[4]
After a ground-based test of the experimental 1965 Romashka reactor, which used uranium and direct thermoelectric conversion to electricity,[5] the USSR sent about 40 nuclear-electric satellites into space, mostly powered by the BES-5 reactor. The more powerful TOPAZ-II reactor produced 10 kilowatts of electricity.[3]
Examples of concepts that use nuclear power for space propulsion systems include the nuclear electric rocket (nuclear powered ion thruster(s)), the radioisotope rocket, and radioisotope electric propulsion (REP).[6] One of the more explored concepts is the nuclear thermal rocket, which was ground tested in the NERVA program. Nuclear pulse propulsion was the subject of Project Orion.[7]
Regulation and hazard prevention[edit]
After the ban of nuclear weapons in space by the Outer Space Treaty in 1967, nuclear power has been discussed at least since 1972 as a sensitive issue by states.[8] Particularly its potential hazards to Earth's environment and thus also humans has prompted states to adopt in the U.N. General Assembly the Principles Relevant to the Use of Nuclear Power Sources in Outer Space (1992), particularly introducing safety principles for launches and to manage their traffic.[8]
Benefits
Both the Viking 1 and Viking 2 landers used RTGs for power on the surface of Mars. (Viking launch vehicle pictured)
While solar power is much more commonly used, nuclear power can offer advantages in some areas. Solar cells, although efficient, can only supply energy to spacecraft in orbits where the solar flux is sufficiently high, such as low Earth orbit and interplanetary destinations close enough to the Sun. Unlike solar cells, nuclear power systems function independently of sunlight, which is necessary for deep space exploration. Nuclear-based systems can have less mass than solar cells of equivalent power, allowing more compact spacecraft that are easier to orient and direct in space. In the case of crewed spaceflight, nuclear power concepts that can power both life support and propulsion systems may reduce both cost and flight time.[9]
Selected applications and/or technologies for space include:
Radioisotope thermoelectric generator
Radioisotope heater unit
Radioisotope piezoelectric generator
Radioisotope rocket
Nuclear thermal rocket
Nuclear pulse propulsion
Nuclear electric rocket
"""
docs = [
Document(
page_content=split,
metadata={"source": "https://en.wikipedia.org/wiki/Nuclear_power_in_space"},
)
for split in text.split("\n\n")
]
print(
map_rerank_chain.invoke({"docs": docs, "question": "How were the vikings powered"})
) | [
"Answer the user question using the context.",
"{page_content}",
"\n\nContext:\n\n{context}\n\nQuestion: {question}"
] |
2024-01-10 | getBrijendra/RandomCodeSnippets | routingChain.py | from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
from langchain.utils.math import cosine_similarity
from langchain.llms import openai
import os
import langchain
langchain.debug = True
physics_template = """You are a very smart program manager and expert at Jiira software to manage software developement project. \
You are great at classifying query if it is about jira issue in a concise and easy to understand manner. \
When you don't know the answer to a question you admit that you don't know.
Here is a question:
{query}"""
math_template = """You are a very good mathematician. You are great at answering math questions. \
You are so good because you are able to break down hard problems into their component parts, \
answer the component parts, and then put them together to answer the broader question.
Here is a question:
{query}"""
embeddings = OpenAIEmbeddings()
prompt_templates = [physics_template, math_template]
prompt_embeddings = embeddings.embed_documents(prompt_templates)
def prompt_router(input):
query_embedding = embeddings.embed_query(input["query"])
similarity = cosine_similarity([query_embedding], prompt_embeddings)[0]
most_similar = prompt_templates[similarity.argmax()]
print("Using MATH" if most_similar == math_template else "Using PHYSICS")
return PromptTemplate.from_template(most_similar)
chain = (
{"query": RunnablePassthrough()}
| RunnableLambda(prompt_router)
| ChatOpenAI()
| StrOutputParser()
)
print(chain.invoke("What's a black hole"))
#https://python.langchain.com/docs/modules/chains/foundational/router
#https://python.langchain.com/docs/expression_language/cookbook/embedding_router
#https://python.langchain.com/docs/expression_language/how_to/routing | [
"You are a very good mathematician. You are great at answering math questions. You are so good because you are able to break down hard problems into their component parts, answer the component parts, and then put them together to answer the broader question.\n\nHere is a question:\n{query}",
"You are a very smart program manager and expert at Jiira software to manage software developement project. You are great at classifying query if it is about jira issue in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know.\n\nHere is a question:\n{query}",
"[\"You are a very smart program manager and expert at Jiira software to manage software developement project. You are great at classifying query if it is about jira issue in a concise and easy to understand manner. When you don't know the answer to a question you admit that you don't know.\\n\\nHere is a question:\\n{query}\", 'You are a very good mathematician. You are great at answering math questions. You are so good because you are able to break down hard problems into their component parts, answer the component parts, and then put them together to answer the broader question.\\n\\nHere is a question:\\n{query}']"
] |
2024-01-10 | getBrijendra/RandomCodeSnippets | chat-doc-demo.py | from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.document_loaders import TextLoader
from langchain.chains import OpenAIModerationChain
from langchain.llms import openai
from langchain.embeddings import OpenAIEmbeddings
from langchain.tools import Tool
import chromadb
from chromadb.utils import embedding_functions
from googledrive import CustomGoogleDriveLoader
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough, RunnableLambda
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema.runnable import RunnableMap
from langchain.schema import format_document
from operator import itemgetter
from langchain.memory import ConversationBufferMemory
from loguru import logger
from langchain.callbacks import FileCallbackHandler
import asyncio
from langchain.callbacks import get_openai_callback
from typing import Tuple, List
import uuid
import os
import langchain
langchain.debug = True
# logfile = "output.log"
# logger.add(logfile, colorize=True, enqueue=True)
# handler = FileCallbackHandler(logfile)
openai_api_key = ""
openai.api_key = openai_api_key
os.environ['OPENAI_API_KEY'] = openai_api_key
os.environ['FOLDER_ID'] = 'werrdw23'
# chroma_db_Client = chromadb.HttpClient(host='localhost', port=8000)
chroma_db_Client = chromadb.HttpClient(host='localhost', port=8000)
# Set up OpenAI embeddings
embeddings = OpenAIEmbeddings()
openai_ef = embedding_functions.OpenAIEmbeddingFunction(
api_key=openai_api_key,
model_name="text-embedding-ada-002"
)
#Loads Data From GoogleDrive
def loadDataFromGoogleDrive():
folder_id = os.environ.get('FOLDER_ID')
print(f'FOLDER ID: {folder_id}')
loader = CustomGoogleDriveLoader(
folder_id=folder_id,
token_path= 'token.json',
skip_on_failure=True,
# file_types=["document", "pdf"],
# file_loader_cls=TextLoader,
file_loader_kwargs={"mode": "elements"}
# Optional: configure whether to recursively fetch files from subfolders. Defaults to False.
)
docs = loader.load()
print(f'Length of the DOCS: {len(docs)}')
for doc in docs:
print(doc.metadata)
return docs
#Splits the documents list into Chunks
def textChunker(chunk_size: int, chunk_overlap: int, documents: list):
# split into chunks
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
length_function=len
)
docs = text_splitter.split_documents(documents)
return docs
#Create OpenAI Embeddings and Save It To Chroma
def createEmbedingsAndSaveToChroma(docs: list):
# Set up OpenAI embeddings
openai_ef = embedding_functions.OpenAIEmbeddingFunction(
api_key=openai_api_key,
model_name="text-embedding-ada-002"
)
# load Chroma Client
chroma_db_Clients = chroma_db_Client
# Use 'openai_ef' *OpenAIEmbeddings Function* to create the Collection
collection = chroma_db_Clients.get_or_create_collection(name="my_collection", embedding_function=openai_ef)
# Save each chunk with the metadata to ChromaDB
for doc in docs:
# Save Each Document in chromaDb
collection.add(
ids=[str(uuid.uuid1())], metadatas=doc.metadata, documents=doc.page_content
)
def load_data_from_source_to_vstore():
# load the document and split it into chunks
loader = TextLoader("./sample_text.txt")
documents = loader.load()
# split it into chunks
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = text_splitter.split_documents(documents)
# Set up OpenAI embeddings
embeddings = OpenAIEmbeddings()
openai_ef = embedding_functions.OpenAIEmbeddingFunction(
api_key=openai_api_key,
model_name="text-embedding-ada-002"
)
# load it into Chroma
persistent_client = chroma_db_Client
#collection = persistent_client.get_or_create_collection("collection_name")
collection = persistent_client.get_or_create_collection(name="my_collection", embedding_function=openai_ef)
for doc in docs:
collection.add(
ids=[str(uuid.uuid1())], metadatas=doc.metadata, documents=doc.page_content
)
db = Chroma(
client=persistent_client,
collection_name="my_collection",
embedding_function=embeddings,
)
# query it
query = "How AI is helpful?"
docs = db.similarity_search(query)
#print results
print('length of matching docs:' + str(len(docs)))
print(docs[0].page_content)
def load_data_from_disk():
# load from disk
#persistent_client_for_loading = chromadb.PersistentClient()
persistent_client_for_loading = chroma_db_Client
openai_ef_for_loading = embedding_functions.OpenAIEmbeddingFunction(
api_key=openai_api_key,
model_name="text-embedding-ada-002"
)
collection = persistent_client_for_loading.get_collection(name="my_collection", embedding_function=openai_ef_for_loading) # Get a collection object from an existing collection, by name. Will raise an exception if it's not found.
# Set up OpenAI embeddings
embeddings = OpenAIEmbeddings()
db2 = Chroma(
client=persistent_client_for_loading,
collection_name="my_collection",
embedding_function=embeddings,
)
query2 = "How AI is helpful in climate change?"
docs2 = db2.similarity_search(query2)
#print results
print('################### After loading from disk ##################')
print('length of matching docs:'+ str(len(docs2)))
print(docs2[0].page_content)
return db2
############# USING RAG ################################
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
def _combine_documents(docs, document_prompt = DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"):
doc_strings = [format_document(doc, document_prompt) for doc in docs]
doc_joined = document_separator.join(doc_strings)
print('_combine_documents: doc_joined:', doc_joined)
return doc_joined
def _format_chat_history(chat_history: List[Tuple]) -> str:
print('_format_chat_history: chat_history:', chat_history)
buffer = ""
for dialogue_turn in chat_history:
human = "Human: " + dialogue_turn[0]
ai = "Assistant: " + dialogue_turn[1]
buffer += "\n" + "\n".join([human, ai])
print('_format_chat_history: chat_history combined:', buffer)
return buffer
def get_tokens_info_for_request(cb):
return {
"Total Tokens": cb.total_tokens,
"Prompt Tokens": cb.prompt_tokens,
"Completion Tokens": cb.completion_tokens,
"Total Cost (USD)": cb.total_cost
}
def answer_queries(user_query):
result = {}
with get_openai_callback() as cb:
db2 = load_data_from_disk()
moderate = OpenAIModerationChain()
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Answer:"""
ANSWER_PROMPT = ChatPromptTemplate.from_template(prompt_template)
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
# search_kwargs={"k": 4}
retriever = db2.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": 0.7, "k": 4})
memory = ConversationBufferMemory(return_messages=True, output_key="answer", input_key="question")
# First we add a step to load memory
# This adds a "memory" key to the input object
loaded_memory = RunnablePassthrough.assign(
chat_history=RunnableLambda(memory.load_memory_variables) | itemgetter("history"),
)
# Now we calculate the standalone question
standalone_question = {
"standalone_question": {
"question": lambda x: x["question"],
"chat_history": lambda x: _format_chat_history(x['chat_history'])
} | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0, callbacks=[handler]) | StrOutputParser(),
}
print('standalone_question:', standalone_question)
# Now we retrieve the documents
retrieved_documents = {
"docs": itemgetter("standalone_question") | retriever,
"question": lambda x: x["standalone_question"]
}
# Now we construct the inputs for the final prompt
final_inputs = {
"context": lambda x: _combine_documents(x["docs"]),
"question": itemgetter("question")
}
# And finally, we do the part that returns the answers
answer = {
"answer": final_inputs | ANSWER_PROMPT | ChatOpenAI(callbacks=[handler]),
"docs": itemgetter("docs"),
}
# And now we put it all together!
final_chain = loaded_memory | standalone_question | retrieved_documents | answer #| moderate
inputs = {"question": user_query}
print('Invoking final_chain....')
result = final_chain.invoke(inputs)
print(result['answer'].content)
print(result['docs'])
# Note that the memory does not save automatically
# This will be improved in the future
# For now you need to save it yourself
memory.save_context(inputs, {"answer": result["answer"].content})
print(memory.load_memory_variables({}))
tokens_info = get_tokens_info_for_request(cb)
return {
"response": result['answer'].content,
"references": [{"content": doc.page_content, "metadata": doc.metadata} for doc in result['docs']],
"total_tokens": tokens_info
}
if __name__ == "__main__":
# load_data_from_source_to_vstore()
# Load the documents from Google_DRIVE
# documents = loadDataFromGoogleDrive()
# # SPLIT THE TEXT into chunks
# docs = textChunker(600, 100, documents)
# # Create OpenAI embeddings And Save it To Chroma
# createEmbedingsAndSaveToChroma(docs)
res = answer_queries("You are stupid?")
# res = answer_queries("Who is SamsungM51?")
# res = answer_queries("What is Shell Scripting?")
print("\n\n Result:")
print(res)
langchain.debug = False | [
"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n {context}\n\n Question: {question}\n Answer:",
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question:",
"{page_content}"
] |
2024-01-10 | getBrijendra/RandomCodeSnippets | ingestDataFromGDrive.py | from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.llms import openai
from langchain.embeddings import OpenAIEmbeddings
from langchain.tools import Tool
import os
import getpass
from pymongo import MongoClient
from langchain.vectorstores import MongoDBAtlasVectorSearch
# import chromadb
# from chromadb.utils import embedding_functions
from lambda_app.llm.ingestUtils.GoogleDriveLoader import GoogleDriveLoader
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough, RunnableLambda
from langchain.document_loaders import UnstructuredFileIOLoader
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema.runnable import RunnableMap
from langchain.schema import format_document
from operator import itemgetter
from langchain.memory import ConversationBufferMemory
#from loguru import logger
from langchain.callbacks import FileCallbackHandler
import asyncio
from langchain.callbacks import get_openai_callback
from typing import Tuple, List
import uuid
import os
#logfile = "output.log"
#logger.add(logfile, colorize=True, enqueue=True)
#handler = FileCallbackHandler(logfile)
openai_api_key = ""
openai.api_key = openai_api_key
os.environ['OPENAI_API_KEY'] = openai_api_key
os.environ['FOLDER_ID'] = '43f34fwe'
# chroma_db_Client = chromadb.HttpClient(host=os.environ.get('CHROMADB_IP_ADDRESS'), port=8000)
# openai_ef = embedding_functions.OpenAIEmbeddingFunction(
# api_key=openai_api_key,
# model_name="text-embedding-ada-002"
# )
MONGODB_ATLAS_CLUSTER_URI = 'mongodb+srv://abc:[email protected]/'
# initialize MongoDB python client
mongo_db_client = MongoClient(MONGODB_ATLAS_CLUSTER_URI)
db_name = "langchain_db"
collection_name = "my_collection"
collection = mongo_db_client[db_name][collection_name]
index_name = "langchain_demo"
embeddings = OpenAIEmbeddings()
#Loads Data From GoogleDrive Folders
def loadDataFromGoogleDriveFolder(folder_id: str):
# folder_id = "1qczk8ORiLNYUNQ3D6h5tCYt70QdmW870"
folder_id = folder_id
print(f'FOLDER ID: {folder_id}')
loader = GoogleDriveLoader(
folder_id=folder_id,
token_path= 'token.json',
skip_on_failure=True,
# file_types=["document", "pdf"],
# file_loader_cls=TextLoader,
file_loader_kwargs={"mode": "elements"}
)
docs = loader.load()
print(f'Length of the DOCS: {len(docs)}')
for doc in docs:
print(doc.metadata)
return docs
#Loads Data From GoogleDrive Files
def loadDataFromGoogleDriveFiles(file_ids: List[str]):
print(f'[loadDataFromGoogleDriveFiles] FILE IDs: {file_ids}')
loader = GoogleDriveLoader(
document_ids=file_ids,
token_path= 'token.json',
skip_on_failure=True,
# file_loader_cls=UnstructuredFileIOLoader,
# file_types=["document", "pdf"],
# file_loader_cls=TextLoader,
file_loader_kwargs={"mode": "elements"}
)
docs = loader.load()
print(f'Length of the DOCS: {len(docs)}')
for doc in docs:
print("PageContent:", doc.page_content, "\n")
print("Metadata:", doc.metadata, "\n")
return docs
#Splits the documents list into Chunks
def textChunker(chunk_size: int, chunk_overlap: int, documents: list):
# split into chunks
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
length_function=len
)
docs = text_splitter.split_documents(documents)
return docs
# #Create OpenAI Embeddings and Save It To Chroma
# def createEmbedingsAndSaveToChroma(docs: list):
# # Set up OpenAI embeddings
# openai_ef = embedding_functions.OpenAIEmbeddingFunction(
# api_key=openai_api_key,
# model_name="text-embedding-ada-002"
# )
# # load Chroma Client
# chroma_db_Clients = mongo_db_client
# # Use 'openai_ef' *OpenAIEmbeddings Function* to create the Collection
# collection = chroma_db_Clients.get_or_create_collection(name="my_collection", embedding_function=openai_ef)
# # Save each chunk with the metadata to ChromaDB
# for doc in docs:
# # Save Each Document in chromaDb
# collection.add(
# ids=[str(uuid.uuid1())], metadatas=doc.metadata, documents=doc.page_content
# )
def createEmbedingsAndSaveToChroma(docs: list):
docsearch = MongoDBAtlasVectorSearch.from_documents(
docs, embeddings, collection=collection, index_name=index_name
)
def ingestDataFromGoogleDrive(body: dict):
folderIds = body.get('folder_ids',[])
fileIds = body.get('file_ids',[])
# Ingest Data from folders and push it to Chroma Db
for folderId in folderIds:
documents = loadDataFromGoogleDriveFolder(folderId)
chunkedData = textChunker(500, 100, documents)
createEmbedingsAndSaveToChroma(chunkedData)
# Ingest Data from files
if len(fileIds)!=0:
documents = loadDataFromGoogleDriveFiles(fileIds)
chunkedData = textChunker(500, 100, documents)
createEmbedingsAndSaveToChroma(chunkedData)
return {"success": True, "message": "Data Ingested Successfully"}, 200
| [] |
2024-01-10 | cliffpyles/Helpers | apps~author-cli~author_cli.py | #!/usr/bin/env python3
import json
import yaml
import click
import inquirer
import openai
import os
from pathlib import Path
from inquirer.errors import ValidationError
openai.api_key = os.getenv("OPENAI_API_KEY")
class RangeValidator(object):
def __init__(self, min_value, max_value):
self.min_value = min_value
self.max_value = max_value
def __call__(self, _, value):
try:
int_value = int(value)
if self.min_value <= int_value <= self.max_value:
return value
else:
raise ValidationError("", reason=f"Value must be between {self.min_value} and {self.max_value}")
except ValueError:
raise ValidationError("", reason="Please enter a valid number")
def read_file(file_path):
with open(file_path, 'r') as file:
content = file.read()
return content
def load_config(file_path):
with open(file_path, 'r') as config_file:
if file_path.endswith('.json'):
return json.load(config_file)
elif file_path.endswith('.yml') or file_path.endswith('.yaml'):
return yaml.safe_load(config_file)
else:
raise ValueError('Invalid file format. Use JSON or YAML.')
def display_prompts(prompts, arguments):
questions = []
for prompt in prompts:
prompt_key = prompt.get('key')
prompt_type = prompt['type']
kwargs = prompt['kwargs']
if prompt_key and arguments.get(prompt_key) is not None:
continue
if prompt_type == 'text':
question = inquirer.Text(**kwargs)
elif prompt_type == 'checkbox':
question = inquirer.Checkbox(**kwargs)
elif prompt_type == 'radio':
question = inquirer.List(**kwargs)
elif prompt_type == 'range':
min_value = kwargs.pop('min', None)
max_value = kwargs.pop('max', None)
if min_value is not None and max_value is not None:
kwargs['validate'] = RangeValidator(min_value, max_value)
question = inquirer.Text(**kwargs)
elif prompt_type == 'file':
question = inquirer.Text(**kwargs)
else:
raise ValueError(f'Invalid prompt type: {prompt_type}')
questions.append(question)
user_responses = inquirer.prompt(questions)
responses = {**arguments, **user_responses}
# Read the contents of the file for 'file' prompt type
for prompt in prompts:
prompt_key = prompt.get('key')
prompt_type = prompt['type']
if prompt_type == 'file' and responses.get(prompt_key) is not None:
file_path = responses[prompt_key]
responses[f"{prompt_key}_content"] = read_file(file_path)
return {k: v for k, v in responses.items() if v is not None}
def generate_options(prompts):
options = []
for prompt in prompts:
prompt_key = prompt.get('key')
prompt_type = prompt['type']
if prompt_key:
if prompt_type == 'radio':
choices = prompt['kwargs']['choices']
option = click.Option(param_decls=[f'--{prompt_key}'],
type=click.Choice(choices, case_sensitive=False),
help=f'Pass your {prompt_key} preference as an argument.')
elif prompt_type == 'file':
option = click.Option(param_decls=[f'--{prompt_key}'],
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help=f'Pass the file path for {prompt_key} as an argument.')
else:
option = click.Option(param_decls=[f'--{prompt_key}'],
type=str,
help=f'Pass your {prompt_key} as an argument.')
options.append(option)
return options
def chat_with_gpt(message):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": message}]
)
return response.choices[0].message['content'].strip()
def main(**kwargs):
file_path = kwargs.pop('file', None) or config_file_path
config = load_config(str(file_path))
responses = display_prompts(config["prompts"], kwargs)
# Construct the command to reproduce the current context
command = "author"
for k, v in responses.items():
for prompt in config["prompts"]:
prompt_key = prompt.get('key')
prompt_type = prompt['type']
if k == prompt_key and prompt_type != 'file':
command += f" --{k} \"{v}\""
elif k == prompt_key and prompt_type == 'file':
command += f" --{k} \"{v}\""
# Initialize the messages list with the system message
messages = [{"role": "system", "content": config["context"]}]
# Add user responses as separate messages
for k, v in responses.items():
for prompt in config["prompts"]:
prompt_key = prompt.get('key')
prompt_type = prompt['type']
if k == prompt_key and prompt_type == 'file':
messages.append({"role": "user", "content": f"{k}_path: {v}"})
messages.append({"role": "user", "content": f"{k}: {responses[f'{k}_content']}"})
elif k == prompt_key:
messages.append({"role": "user", "content": f"{k}: {v}"})
for message in config["messages"]:
messages.append({"role": "user", "content": message})
# Send messages to ChatGPT and display the response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
chatgpt_response = response.choices[0].message['content'].strip()
click.echo(chatgpt_response, color=True)
click.echo("\n\nCommand:\n")
click.echo(command)
click.echo("\n\n")
script_dir = Path(__file__).resolve(strict=False).parent
config_file_path = script_dir / './prompts.yaml'
config = load_config(str(config_file_path))
options = generate_options(config["prompts"])
main = click.Command('main', callback=main, params=options)
if __name__ == '__main__':
main() | [
"PLACEHOLDER: PLACEHOLDER",
"PLACEHOLDER_path: PLACEHOLDER",
"You are a helpful assistant.",
"context"
] |
2024-01-10 | cliffpyles/Helpers | .blueprints~chat_app~files~apps~%7B%7Bblueprint_instance_name___kebab_case%7D%7D-cli~gpt_chat.py | import openai
import sys
from pathlib import Path
def strip_cwd(filepath):
cwd = Path.cwd()
fullpath = Path(filepath).resolve()
if fullpath.parts[:len(cwd.parts)] == cwd.parts:
return str(fullpath.relative_to(cwd))
else:
return str(fullpath)
def read_file(file_path):
"""Read the content of a file."""
with open(file_path, 'r') as file:
content = file.read()
return content
def chat_with_gpt(config, responses):
"""
Interacts with the GPT-3.5-turbo model based on the provided config and user responses.
:param config: The loaded configuration dictionary containing prompts and other settings.
:param responses: The user responses dictionary.
:return: A tuple containing the GPT model's response and the command string to reproduce the context.
"""
# Read the contents of files that have 'file' prompt type
for prompt in config["prompts"]:
prompt_key = prompt.get('key')
prompt_type = prompt['type']
response = responses.get(prompt_key)
if prompt_type == 'file' and response is not None and response.strip() != '':
filepath = responses[prompt_key]
responses[f"{prompt_key}_filepath"] = strip_cwd(filepath)
responses[f"{prompt_key}_content"] = read_file(filepath)
del responses[prompt_key]
# Initialize the messages list with the system message
messages = [{"role": "system", "content": config["context"]}]
# Add user responses as separate messages
for k, v in responses.items():
for prompt in config["prompts"]:
prompt_key = prompt.get('key')
prompt_type = prompt['type']
response = responses[k]
messages.append({"role": "user", "content": f"{k}: {v}"})
# Predefined messages from the configuration
for message in config["messages"]:
messages.append({"role": "user", "content": message})
# Send messages to ChatGPT and return the response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
chatgpt_response = response.choices[0].message['content'].strip()
return chatgpt_response
| [
"PLACEHOLDER: PLACEHOLDER",
"context"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.