date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
list |
---|---|---|---|---|
2024-01-10 | 1ou2/docdoctor | tuto.py | import os
from dotenv import load_dotenv
from openai import AzureOpenAI
fact_sheet_chair = f"""
<OVERVIEW
- Part of a beautiful family of mid-century inspired office furniture,
including filing cabinets, desks, bookcases, meeting tables, and more.
- Several options of shell color and base finishes.
- Available with plastic back and front upholstery (SWC-100)
or full upholstery (SWC-110) in 10 fabric and 6 leather options.
- Base finish options are: stainless steel, matte black,
gloss white, or chrome.
- Chair is available with or without armrests.
- Suitable for home or business settings.
- Qualified for contract use.
CONSTRUCTION
- 5-wheel plastic coated aluminum base.
- Pneumatic chair adjust for easy raise/lower action.
DIMENSIONS
- WIDTH 53 CM | 20.87”
- DEPTH 51 CM | 20.08”
- HEIGHT 80 CM | 31.50”
- SEAT HEIGHT 44 CM | 17.32”
- SEAT DEPTH 41 CM | 16.14”
OPTIONS
- Soft or hard-floor caster options.
- Two choices of seat foam densities:
medium (1.8 lb/ft3) or high (2.8 lb/ft3)
- Armless or 8 position PU armrests
MATERIALS
SHELL BASE GLIDER
- Cast Aluminum with modified nylon PA6/PA66 coating.
- Shell thickness: 10 mm.
SEAT
- HD36 foam
COUNTRY OF ORIGIN
- Italy
>"""
def market():
load_dotenv()
client = AzureOpenAI(
api_key=os.getenv("AZURE_OPENAI_KEY"),
api_version="2023-10-01-preview",
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)
deployment_name="gpt35turbo"
response = client.chat.completions.create(
model="GPT4", # model = "deployment_name".
messages=[
{"role": "system", "content": "You are the chief marketing officer of a chair company. "},
{"role": "user", "content": "write a marketing description for the product described between the delimiters " + fact_sheet_chair}
]
)
#print(response.model_dump_json(indent=2))
print(response.choices[0].message.content)
if __name__ == "__main__":
market() | [
"You are the chief marketing officer of a chair company. ",
"write a marketing description for the product described between the delimiters \n<OVERVIEW\n- Part of a beautiful family of mid-century inspired office furniture, \nincluding filing cabinets, desks, bookcases, meeting tables, and more.\n- Several options of shell color and base finishes.\n- Available with plastic back and front upholstery (SWC-100) \nor full upholstery (SWC-110) in 10 fabric and 6 leather options.\n- Base finish options are: stainless steel, matte black, \ngloss white, or chrome.\n- Chair is available with or without armrests.\n- Suitable for home or business settings.\n- Qualified for contract use.\n\nCONSTRUCTION\n- 5-wheel plastic coated aluminum base.\n- Pneumatic chair adjust for easy raise/lower action.\n\nDIMENSIONS\n- WIDTH 53 CM | 20.87”\n- DEPTH 51 CM | 20.08”\n- HEIGHT 80 CM | 31.50”\n- SEAT HEIGHT 44 CM | 17.32”\n- SEAT DEPTH 41 CM | 16.14”\n\nOPTIONS\n- Soft or hard-floor caster options.\n- Two choices of seat foam densities: \n medium (1.8 lb/ft3) or high (2.8 lb/ft3)\n- Armless or 8 position PU armrests \n\nMATERIALS\nSHELL BASE GLIDER\n- Cast Aluminum with modified nylon PA6/PA66 coating.\n- Shell thickness: 10 mm.\nSEAT\n- HD36 foam\n\nCOUNTRY OF ORIGIN\n- Italy\n>"
] |
2024-01-10 | 1ou2/docdoctor | firsttest.py | import os
from openai import AzureOpenAI
from dotenv import load_dotenv
def assist4():
load_dotenv()
client = AzureOpenAI(
api_key=os.getenv("AZURE_OPENAI_KEY"),
api_version="2023-10-01-preview",
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)
deployment_name='GPT4' #This will correspond to the custom name you chose for your deployment when you deployed a model.
""" response = client.chat.completions.create(
model="GPT4", # model = "deployment_name".
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Does Azure OpenAI support customer managed keys?"},
{"role": "assistant", "content": "Yes, customer managed keys are supported by Azure OpenAI."},
{"role": "user", "content": "Do other Azure AI services support this too?"}
]
)
print(response.choices[0].message.content) """
response = client.chat.completions.create(
model="GPT4", # model = "deployment_name".
messages=[
{"role": "system", "content": "Vous êtes un assistant d’un centre d’appel voulant aider les utilisateurs."},
{"role": "user", "content": "Je n’arrive pas à imprimer"},
{"role": "assistant", "content": "Vérifier si votre imprimante est bien configurée dans le panneau de configuration"},
{"role": "user", "content": "Comment changer mon mot de passe ?"}
]
)
print(response.choices[0].message.content)
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0, # this is the degree of randomness of the model's output
)
return response.choices[0].message["content"]
if __name__ == "__main__":
#assist4()
load_dotenv()
client = AzureOpenAI(
api_key=os.getenv("AZURE_OPENAI_KEY"),
api_version="2023-10-01-preview",
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)
deployment_name="gpt35turbo"
# Send a completion call to generate an answer
text = f"""
You should express what you want a model to do by \
providing instructions that are as clear and \
specific as you can possibly make them. \
This will guide the model towards the desired output, \
and reduce the chances of receiving irrelevant \
or incorrect responses. Don't confuse writing a \
clear prompt with writing a short prompt. \
In many cases, longer prompts provide more clarity \
and context for the model, which can lead to \
more detailed and relevant outputs.
"""
prompt = f"""
Summarize the text delimited by triple backticks \
into a single sentence.
```{text}```
"""
#
### COMPLETION API
#
#start_phrase = prompt
#response = client.completions.create(model=deployment_name, prompt=start_phrase, max_tokens=300)
#print(response.choices[0].text)
deployment_name='GPT4' #This will correspond to the custom name you chose for your deployment when you deployed a model.
response = client.chat.completions.create(
model="GPT4", # model = "deployment_name".
messages=[
{"role": "system", "content": "You are an assistant designed to summarize text"},
{"role": "user", "content": prompt}
]
)
print(response.model_dump_json(indent=2))
print(response.choices[0].message.content) | [
"Vous êtes un assistant d’un centre d’appel voulant aider les utilisateurs.",
"You are an assistant designed to summarize text",
"Vérifier si votre imprimante est bien configurée dans le panneau de configuration",
"Comment changer mon mot de passe ?",
"Je n’arrive pas à imprimer",
"\n Summarize the text delimited by triple backticks \\ \n into a single sentence.\n ```PLACEHOLDER```\n "
] |
2024-01-10 | 1ou2/docdoctor | addcontext.py | # imports
import ast # for converting embeddings saved as strings back to arrays
from openai import AzureOpenAI # for calling the OpenAI API
import pandas as pd # for storing text and embeddings data
import tiktoken # for counting tokens
import os # for getting API token from env variable OPENAI_API_KEY
from scipy import spatial # for calculating vector similarities for search
from dotenv import load_dotenv
# models
EMBEDDING_MODEL = "text-embedding-ada-002"
GPT_MODEL = "gpt-3.5-turbo"
# text copied and pasted from: https://en.wikipedia.org/wiki/Curling_at_the_2022_Winter_Olympics
# I didn't bother to format or clean the text, but GPT will still understand it
# the entire article is too long for gpt-3.5-turbo, so I only included the top few sections
wikipedia_article_on_curling = """Curling at the 2022 Winter Olympics
Article
Talk
Read
Edit
View history
From Wikipedia, the free encyclopedia
Curling
at the XXIV Olympic Winter Games
Curling pictogram.svg
Curling pictogram
Venue Beijing National Aquatics Centre
Dates 2–20 February 2022
No. of events 3 (1 men, 1 women, 1 mixed)
Competitors 114 from 14 nations
← 20182026 →
Men's curling
at the XXIV Olympic Winter Games
Medalists
1st place, gold medalist(s) Sweden
2nd place, silver medalist(s) Great Britain
3rd place, bronze medalist(s) Canada
Women's curling
at the XXIV Olympic Winter Games
Medalists
1st place, gold medalist(s) Great Britain
2nd place, silver medalist(s) Japan
3rd place, bronze medalist(s) Sweden
Mixed doubles's curling
at the XXIV Olympic Winter Games
Medalists
1st place, gold medalist(s) Italy
2nd place, silver medalist(s) Norway
3rd place, bronze medalist(s) Sweden
Curling at the
2022 Winter Olympics
Curling pictogram.svg
Qualification
Statistics
Tournament
Men
Women
Mixed doubles
vte
The curling competitions of the 2022 Winter Olympics were held at the Beijing National Aquatics Centre, one of the Olympic Green venues. Curling competitions were scheduled for every day of the games, from February 2 to February 20.[1] This was the eighth time that curling was part of the Olympic program.
In each of the men's, women's, and mixed doubles competitions, 10 nations competed. The mixed doubles competition was expanded for its second appearance in the Olympics.[2] A total of 120 quota spots (60 per sex) were distributed to the sport of curling, an increase of four from the 2018 Winter Olympics.[3] A total of 3 events were contested, one for men, one for women, and one mixed.[4]
Qualification
Main article: Curling at the 2022 Winter Olympics – Qualification
Qualification to the Men's and Women's curling tournaments at the Winter Olympics was determined through two methods (in addition to the host nation). Nations qualified teams by placing in the top six at the 2021 World Curling Championships. Teams could also qualify through Olympic qualification events which were held in 2021. Six nations qualified via World Championship qualification placement, while three nations qualified through qualification events. In men's and women's play, a host will be selected for the Olympic Qualification Event (OQE). They would be joined by the teams which competed at the 2021 World Championships but did not qualify for the Olympics, and two qualifiers from the Pre-Olympic Qualification Event (Pre-OQE). The Pre-OQE was open to all member associations.[5]
For the mixed doubles competition in 2022, the tournament field was expanded from eight competitor nations to ten.[2] The top seven ranked teams at the 2021 World Mixed Doubles Curling Championship qualified, along with two teams from the Olympic Qualification Event (OQE) – Mixed Doubles. This OQE was open to a nominated host and the fifteen nations with the highest qualification points not already qualified to the Olympics. As the host nation, China qualified teams automatically, thus making a total of ten teams per event in the curling tournaments.[6]
Summary
Nations Men Women Mixed doubles Athletes
Australia Yes 2
Canada Yes Yes Yes 12
China Yes Yes Yes 12
Czech Republic Yes 2
Denmark Yes Yes 10
Great Britain Yes Yes Yes 10
Italy Yes Yes 6
Japan Yes 5
Norway Yes Yes 6
ROC Yes Yes 10
South Korea Yes 5
Sweden Yes Yes Yes 11
Switzerland Yes Yes Yes 12
United States Yes Yes Yes 11
Total: 14 NOCs 10 10 10 114
Competition schedule
The Beijing National Aquatics Centre served as the venue of the curling competitions.
Curling competitions started two days before the Opening Ceremony and finished on the last day of the games, meaning the sport was the only one to have had a competition every day of the games. The following was the competition schedule for the curling competitions:
RR Round robin SF Semifinals B 3rd place play-off F Final
Date
Event
Wed 2 Thu 3 Fri 4 Sat 5 Sun 6 Mon 7 Tue 8 Wed 9 Thu 10 Fri 11 Sat 12 Sun 13 Mon 14 Tue 15 Wed 16 Thu 17 Fri 18 Sat 19 Sun 20
Men's tournament RR RR RR RR RR RR RR RR RR SF B F
Women's tournament RR RR RR RR RR RR RR RR SF B F
Mixed doubles RR RR RR RR RR RR SF B F
Medal summary
Medal table
Rank Nation Gold Silver Bronze Total
1 Great Britain 1 1 0 2
2 Sweden 1 0 2 3
3 Italy 1 0 0 1
4 Japan 0 1 0 1
Norway 0 1 0 1
6 Canada 0 0 1 1
Totals (6 entries) 3 3 3 9
Medalists
Event Gold Silver Bronze
Men
details Sweden
Niklas Edin
Oskar Eriksson
Rasmus Wranå
Christoffer Sundgren
Daniel Magnusson Great Britain
Bruce Mouat
Grant Hardie
Bobby Lammie
Hammy McMillan Jr.
Ross Whyte Canada
Brad Gushue
Mark Nichols
Brett Gallant
Geoff Walker
Marc Kennedy
Women
details Great Britain
Eve Muirhead
Vicky Wright
Jennifer Dodds
Hailey Duff
Mili Smith Japan
Satsuki Fujisawa
Chinami Yoshida
Yumi Suzuki
Yurika Yoshida
Kotomi Ishizaki Sweden
Anna Hasselborg
Sara McManus
Agnes Knochenhauer
Sofia Mabergs
Johanna Heldin
Mixed doubles
details Italy
Stefania Constantini
Amos Mosaner Norway
Kristin Skaslien
Magnus Nedregotten Sweden
Almida de Val
Oskar Eriksson
Teams
Men
Canada China Denmark Great Britain Italy
Skip: Brad Gushue
Third: Mark Nichols
Second: Brett Gallant
Lead: Geoff Walker
Alternate: Marc Kennedy
Skip: Ma Xiuyue
Third: Zou Qiang
Second: Wang Zhiyu
Lead: Xu Jingtao
Alternate: Jiang Dongxu
Skip: Mikkel Krause
Third: Mads Nørgård
Second: Henrik Holtermann
Lead: Kasper Wiksten
Alternate: Tobias Thune
Skip: Bruce Mouat
Third: Grant Hardie
Second: Bobby Lammie
Lead: Hammy McMillan Jr.
Alternate: Ross Whyte
Skip: Joël Retornaz
Third: Amos Mosaner
Second: Sebastiano Arman
Lead: Simone Gonin
Alternate: Mattia Giovanella
Norway ROC Sweden Switzerland United States
Skip: Steffen Walstad
Third: Torger Nergård
Second: Markus Høiberg
Lead: Magnus Vågberg
Alternate: Magnus Nedregotten
Skip: Sergey Glukhov
Third: Evgeny Klimov
Second: Dmitry Mironov
Lead: Anton Kalalb
Alternate: Daniil Goriachev
Skip: Niklas Edin
Third: Oskar Eriksson
Second: Rasmus Wranå
Lead: Christoffer Sundgren
Alternate: Daniel Magnusson
Fourth: Benoît Schwarz
Third: Sven Michel
Skip: Peter de Cruz
Lead: Valentin Tanner
Alternate: Pablo Lachat
Skip: John Shuster
Third: Chris Plys
Second: Matt Hamilton
Lead: John Landsteiner
Alternate: Colin Hufman
Women
Canada China Denmark Great Britain Japan
Skip: Jennifer Jones
Third: Kaitlyn Lawes
Second: Jocelyn Peterman
Lead: Dawn McEwen
Alternate: Lisa Weagle
Skip: Han Yu
Third: Wang Rui
Second: Dong Ziqi
Lead: Zhang Lijun
Alternate: Jiang Xindi
Skip: Madeleine Dupont
Third: Mathilde Halse
Second: Denise Dupont
Lead: My Larsen
Alternate: Jasmin Lander
Skip: Eve Muirhead
Third: Vicky Wright
Second: Jennifer Dodds
Lead: Hailey Duff
Alternate: Mili Smith
Skip: Satsuki Fujisawa
Third: Chinami Yoshida
Second: Yumi Suzuki
Lead: Yurika Yoshida
Alternate: Kotomi Ishizaki
ROC South Korea Sweden Switzerland United States
Skip: Alina Kovaleva
Third: Yulia Portunova
Second: Galina Arsenkina
Lead: Ekaterina Kuzmina
Alternate: Maria Komarova
Skip: Kim Eun-jung
Third: Kim Kyeong-ae
Second: Kim Cho-hi
Lead: Kim Seon-yeong
Alternate: Kim Yeong-mi
Skip: Anna Hasselborg
Third: Sara McManus
Second: Agnes Knochenhauer
Lead: Sofia Mabergs
Alternate: Johanna Heldin
Fourth: Alina Pätz
Skip: Silvana Tirinzoni
Second: Esther Neuenschwander
Lead: Melanie Barbezat
Alternate: Carole Howald
Skip: Tabitha Peterson
Third: Nina Roth
Second: Becca Hamilton
Lead: Tara Peterson
Alternate: Aileen Geving
Mixed doubles
Australia Canada China Czech Republic Great Britain
Female: Tahli Gill
Male: Dean Hewitt
Female: Rachel Homan
Male: John Morris
Female: Fan Suyuan
Male: Ling Zhi
Female: Zuzana Paulová
Male: Tomáš Paul
Female: Jennifer Dodds
Male: Bruce Mouat
Italy Norway Sweden Switzerland United States
Female: Stefania Constantini
Male: Amos Mosaner
Female: Kristin Skaslien
Male: Magnus Nedregotten
Female: Almida de Val
Male: Oskar Eriksson
Female: Jenny Perret
Male: Martin Rios
Female: Vicky Persinger
Male: Chris Plys
"""
load_dotenv()
client = AzureOpenAI(
api_key=os.getenv("AZURE_OPENAI_KEY"),
api_version="2023-10-01-preview",
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)
deployment_name='GPT4'
deployment_name="gpt35turbo"
# an example question about the 2022 Olympics
query = 'Which athletes won the gold medal in curling at the 2022 Winter Olympics?'
query = f"""Use the below article on the 2022 Winter Olympics to answer the subsequent question. If the answer cannot be found, write "I don't know."
Article:
\"\"\"
{wikipedia_article_on_curling}
\"\"\"
Question: Which athletes won the gold medal in curling at the 2022 Winter Olympics?"""
response = client.chat.completions.create(
model = deployment_name,
messages=[
{"role": "system", "content": "You answer questions about the 2022 Winter Olympics."},
{"role": "user", "content": query}
]
)
#print(response.model_dump_json(indent=2))
print(response.choices[0].message.content) | [
"You answer questions about the 2022 Winter Olympics."
] |
2024-01-10 | 1ou2/docdoctor | util~embedtest.py | # imports
import ast
from math import cos # for converting embeddings saved as strings back to arrays
from openai import AzureOpenAI # for calling the OpenAI API
import pandas as pd # for storing text and embeddings data
import tiktoken # for counting tokens
import os # for getting API token from env variable OPENAI_API_KEY
from scipy import spatial # for calculating vector similarities for search
from dotenv import load_dotenv
import numpy as np
EMBEDDING_MODEL = "textembedding"
def query():
# models
EMBEDDING_MODEL = "textembedding"
GPT_MODEL = "gpt-3.5-turbo"
load_dotenv()
client = AzureOpenAI(
api_key=os.getenv("AZURE_OPENAI_KEY"),
api_version="2023-10-01-preview",
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)
deployment_name='GPT4'
deployment_name="gpt35turbo"
# an example question about the 2022 Olympics
query = 'Which athletes won the gold medal in curling at the 2022 Winter Olympics?'
response = client.chat.completions.create(
model = deployment_name,
messages=[
{"role": "system", "content": "You answer questions about the 2022 Winter Olympics."},
{"role": "user", "content": query}
]
)
#print(response.model_dump_json(indent=2))
print(response.choices[0].message.content)
def load_data():
embeddings_path = "winter_olympics_2022.csv"
df = pd.read_csv(embeddings_path)
# convert embeddings from CSV str type back to list type
df['embedding'] = df['embedding'].apply(ast.literal_eval)
return df
def pddata():
embeddings_path = "winter_olympics_2022.csv"
df = pd.read_csv(embeddings_path)
#print(df)
#for i in range(10):
# print(df.iloc[i].loc["embedding"])
print("########")
print(df.iloc[3].loc["embedding"])
print("########")
# convert embeddings from CSV str type back to list type
#df['embedding'] = df['embedding'].apply(ast.literal_eval)
print("--------")
print(df.iloc[3].loc["embedding"])
print("===========")
print(df["text"][100])
print("===========")
print(df["embedding"][100])
# search function
def strings_ranked_by_relatedness(
query: str,
df: pd.DataFrame,
relatedness_fn=lambda x, y: 1 - spatial.distance.cosine(x, y),
top_n: int = 100
) -> tuple[list[str], list[float]]:
"""Returns a list of strings and relatednesses, sorted from most related to least."""
query_embedding_response = openai.embeddings.create(
model=EMBEDDING_MODEL,
input=query,
)
query_embedding = query_embedding_response.data[0].embedding
strings_and_relatednesses = [
(row["text"], relatedness_fn(query_embedding, row["embedding"]))
for i, row in df.iterrows()
]
strings_and_relatednesses.sort(key=lambda x: x[1], reverse=True)
strings, relatednesses = zip(*strings_and_relatednesses)
return strings[:top_n], relatednesses[:top_n]
def generate_embeddings(text, model="textembedding"): # model = "deployment_name"
return client.embeddings.create(input = [text], model=model).data[0].embedding
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
if __name__ == "__main__":
#df = load_data()
load_dotenv()
client = AzureOpenAI(
api_key = os.getenv("AZURE_OPENAI_KEY"),
api_version = "2023-05-15",
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)
#pddata()
df = load_data()
emb1 = df["embedding"][100]
text = df["text"][100]
print("===***********")
emb2 = client.embeddings.create(input = [text], model=EMBEDDING_MODEL).data[0].embedding
print(emb2)
similarity = cosine_similarity(emb1,emb2)
print(f"simililarity : {similarity}")
#df_bills['ada_v2'] = df_bills["text"].apply(lambda x : generate_embeddings (x, model = 'text-embedding-ada-002')) | [
"You answer questions about the 2022 Winter Olympics."
] |
2024-01-10 | jaemyoung/SeoulTech-Research-Frontier | LDA_topic_optimalize.py |
from tqdm import tqdm
from gensim.models.ldamodel import LdaModel
from gensim.models.callbacks import CoherenceMetric
from gensim import corpora
from gensim.models.callbacks import PerplexityMetric
import logging
import pickle
from gensim.models.coherencemodel import CoherenceModel
import matplotlib.pyplot as plt
import numpy as np
# coherence 함수
def coherence_optimal_number_of_topics(dictionary, corpus, processed_data):
limit = 60; #토픽 마지막갯수
start = 10; #토픽 시작갯수
step = 5;
coherence_values = []
for num_topics in range(start, limit, step):
lda_model = LdaModel(corpus, id2word=dictionary, num_topics=num_topics, passes=30, iterations= 100, random_state=(1004))
cm = CoherenceModel(model= lda_model, corpus = corpus, coherence= 'u_mass')
coherence_values.append(cm.get_coherence())
x = range(start, limit, step)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence")
plt.show()
#perplexity 함수
def perplexity_optimal_number_of_topics(dictionary, corpus, processed_data):
limit = 60; #토픽 마지막갯수
start = 10; #토픽 시작갯수
step = 5;
perplexity_values = []
for num_topics in range(start, limit, step):
lda_model = LdaModel(corpus, id2word=dictionary, num_topics=num_topics, passes=10, iterations= 100, random_state = 1004)
perplexity_values.append(lda_model.log_perplexity(corpus))
x = range(start, limit, step)
plt.plot(x, perplexity_values)
plt.xlabel("Num Topics")
plt.ylabel("Perplexity")
plt.show()
###########################################################################
# 실행
#preprocessing 완료된 document pickle 파일 열기
with open('C:/Users/user/Documents/GitHub/SeoulTech-Research-Frontier/data/preprocessing_data(4046).pickle',"rb") as fr:
tokenized_doc = pickle.load(fr)
# 출현빈도가 적거나 자주 등장하는 단어는 제거
dictionary = corpora.Dictionary(tokenized_doc)
dictionary.filter_extremes(no_below=10, no_above=0.05)
corpus = [dictionary.doc2bow(text) for text in tokenized_doc]
print('Number of unique tokens: %d' % len(dictionary))
print('Number of documents: %d' % len(corpus))
# 최적의 토픽 수 찾기
coherence_optimal_number_of_topics(dictionary, corpus, tokenized_doc)
perplexity_optimal_number_of_topics(dictionary, corpus, tokenized_doc)
###########################################################################
# 최적의 pass 수 찾기(num_topic 고정)
coherences=[]
perplexities=[]
passes=[]
x = range(10,110,10)
plt.plot(x,coherences)
plt.xlabel("Pass")
plt.ylabel("Coherence")
plt.show()
x = range(10,110,10)
plt.plot(x,perplexities)
plt.xlabel("Pass")
plt.ylabel("Perplexity")
plt.show()
for p in range(10,110,10):
passes.append(p)
lda = LdaModel(corpus, id2word=dictionary, num_topics=30, iterations=400, passes=p)
cm = CoherenceModel(model=lda, corpus=corpus, coherence='u_mass')
coherence = cm.get_coherence()
print("Cpherence",coherence)
coherences.append(coherence)
print('Perplexity: ', lda.log_perplexity(corpus),'\n\n')
perplexities.append(lda.log_perplexity(corpus))
| [] |
2024-01-10 | frankdji/ChatPaperPlus | chat_paper_plus.py | import numpy as np
import os
import re
import datetime
import arxiv
import openai, tenacity
import base64, requests
import argparse
import configparser
import json
import tiktoken
from get_paper_from_pdf import Paper
# 定义Reader类
class Reader:
# 初始化方法,设置属性
def __init__(self, key_word, query, filter_keys,
root_path='./',
gitee_key='',
sort=arxiv.SortCriterion.SubmittedDate, user_name='defualt', args=None):
self.user_name = user_name # 读者姓名
self.key_word = key_word # 读者感兴趣的关键词
self.query = query # 读者输入的搜索查询
self.sort = sort # 读者选择的排序方式
if args.language == 'en':
self.language = 'English'
elif args.language == 'zh':
self.language = 'Chinese'
else:
self.language = 'Chinese'
self.filter_keys = filter_keys # 用于在摘要中筛选的关键词
self.root_path = root_path
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.file_format = args.file_format
if args.save_image:
self.gitee_key = self.config.get('Gitee', 'api')
else:
self.gitee_key = ''
self.max_token_num = 4096
self.encoding = tiktoken.get_encoding("gpt2")
def get_arxiv(self, max_results=30):
search = arxiv.Search(query=self.query,
max_results=max_results,
sort_by=self.sort,
sort_order=arxiv.SortOrder.Descending,
)
return search
def filter_arxiv(self, max_results=30):
search = self.get_arxiv(max_results=max_results)
print("all search:")
for index, result in enumerate(search.results()):
print(index, result.title, result.updated)
filter_results = []
filter_keys = self.filter_keys
print("filter_keys:", self.filter_keys)
# 确保每个关键词都能在摘要中找到,才算是目标论文
for index, result in enumerate(search.results()):
abs_text = result.summary.replace('-\n', '-').replace('\n', ' ')
meet_num = 0
for f_key in filter_keys.split(" "):
if f_key.lower() in abs_text.lower():
meet_num += 1
if meet_num == len(filter_keys.split(" ")):
filter_results.append(result)
# break
print("筛选后剩下的论文数量:")
print("filter_results:", len(filter_results))
print("filter_papers:")
for index, result in enumerate(filter_results):
print(index, result.title, result.updated)
return filter_results
def validateTitle(self, title):
# 将论文的乱七八糟的路径格式修正
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", title) # 替换为下划线
return new_title
def download_pdf(self, filter_results):
# 先创建文件夹
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
key_word = str(self.key_word.replace(':', ' '))
path = self.root_path + 'pdf_files/' + self.query.replace('au: ', '').replace('title: ', '').replace('ti: ',
'').replace(
':', ' ')[:25] + '-' + date_str
try:
os.makedirs(path)
except:
pass
print("All_paper:", len(filter_results))
# 开始下载:
paper_list = []
for r_index, result in enumerate(filter_results):
try:
title_str = self.validateTitle(result.title)
pdf_name = title_str + '.pdf'
# result.download_pdf(path, filename=pdf_name)
self.try_download_pdf(result, path, pdf_name)
paper_path = os.path.join(path, pdf_name)
print("paper_path:", paper_path)
paper = Paper(path=paper_path,
url=result.entry_id,
title=result.title,
abs=result.summary.replace('-\n', '-').replace('\n', ' '),
authers=[str(aut) for aut in result.authors],
)
# 下载完毕,开始解析:
paper.parse_pdf()
paper_list.append(paper)
except Exception as e:
print("download_error:", e)
pass
return paper_list
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def try_download_pdf(self, result, path, pdf_name):
result.download_pdf(path, filename=pdf_name)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def upload_gitee(self, image_path, image_name='', ext='png'):
"""
上传到码云
:return:
"""
with open(image_path, 'rb') as f:
base64_data = base64.b64encode(f.read())
base64_content = base64_data.decode()
date_str = str(datetime.datetime.now())[:19].replace(':', '-').replace(' ', '-') + '.' + ext
path = image_name + '-' + date_str
payload = {
"access_token": self.gitee_key,
"owner": self.config.get('Gitee', 'owner'),
"repo": self.config.get('Gitee', 'repo'),
"path": self.config.get('Gitee', 'path'),
"content": base64_content,
"message": "upload image"
}
# 这里需要修改成你的gitee的账户和仓库名,以及文件夹的名字:
url = f'https://gitee.com/api/v5/repos/' + self.config.get('Gitee', 'owner') + '/' + self.config.get('Gitee',
'repo') + '/contents/' + self.config.get(
'Gitee', 'path') + '/' + path
rep = requests.post(url, json=payload).json()
print("rep:", rep)
if 'content' in rep.keys():
image_url = rep['content']['download_url']
else:
image_url = r"https://gitee.com/api/v5/repos/" + self.config.get('Gitee', 'owner') + '/' + self.config.get(
'Gitee', 'repo') + '/contents/' + self.config.get('Gitee', 'path') + '/' + path
return image_url
def summary_with_chat(self, paper_list):
htmls = []
for paper_index, paper in enumerate(paper_list):
# 第一步先用title,abs,和introduction进行总结。
text = ''
text += 'Title:' + paper.title
text += 'Url:' + paper.url
text += 'Abstrat:' + paper.abs
text += 'Paper_info:' + paper.section_text_dict['paper_info']
# intro
text += list(paper.section_text_dict.values())[0]
chat_summary_text = ""
try:
chat_summary_text = self.chat_summary(text=text)
except Exception as e:
print("summary_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len(
"your messages resulted in") + 1
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
summary_prompt_token = offset + 1000 + 150
chat_summary_text = self.chat_summary(text=text, summary_prompt_token=summary_prompt_token)
htmls.append('# Paper:' + paper.title) # zzn
# htmls.append('## Paper:' + str(paper_index+1))
htmls.append('\n\n\n')
htmls.append('# 文章概要')
htmls.append(chat_summary_text)
# 第二步总结方法:
# TODO,由于有些文章的方法章节名是算法名,所以简单的通过关键词来筛选,很难获取,后面需要用其他的方案去优化。
htmls.append('# 文章提出的方法')
method_key = ''
for parse_key in paper.section_text_dict.keys():
if 'method' in parse_key.lower() or 'approach' in parse_key.lower():
method_key = parse_key
break
if method_key != '':
text = ''
method_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text
# methods
method_text += paper.section_text_dict[method_key]
text = summary_text + "\n\n<Methods>:\n\n" + method_text
chat_method_text = ""
try:
chat_method_text = self.chat_method(text=text)
except Exception as e:
print("method_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len(
"your messages resulted in") + 1
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
method_prompt_token = offset + 800 + 150
chat_method_text = self.chat_method(text=text, method_prompt_token=method_prompt_token)
htmls.append(chat_method_text)
else:
chat_method_text = ''
htmls.append("\n"*4)
# 第三步总结实验
htmls.append('# 主要实验结果')
experi_key = ''
for parse_key in paper.section_text_dict.keys():
if 'experiment' in parse_key.lower() or 'experiments' in parse_key.lower():
experi_key = parse_key
break
if experi_key != '':
text = ''
experi_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text + "\n\n <Method summary>:\n\n" + chat_method_text
experi_text += paper.section_text_dict[experi_key]
text = "\n\n<Experiment>:\n\n" + experi_text
# text = summary_text + "\n\n<Experiment>:\n\n" + experi_text
chat_experi_text = ""
chat_experi_text = self.chat_experi(text=text) #TO DO
# try:
# chat_experi_text = self.chat_experi(text=text)
# except Exception as e:
# print("experi_error:", e)
# if "maximum context" in str(e):
# current_tokens_index = str(e).find("your messages resulted in") + len(
# "your messages resulted in") + 1
# offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
# experi_prompt_token = offset + 1000 + 150
# chat_experi_text = self.chat_experi(text=text, experi_prompt_token=experi_prompt_token)
htmls.append(chat_experi_text)
else:
chat_experi_text = ''
htmls.append("\n" * 4)
# 第四步总结全文,并打分:
htmls.append('# 全文总结')
conclusion_key = ''
for parse_key in paper.section_text_dict.keys():
if 'conclu' in parse_key.lower():
conclusion_key = parse_key
break
text = ''
conclusion_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text + "\n <Method summary>:\n" + chat_method_text + "\n <Experiment summary>:\n" + chat_experi_text
if conclusion_key != '':
# conclusion
conclusion_text += paper.section_text_dict[conclusion_key]
text = summary_text + "\n\n<Conclusion>:\n\n" + conclusion_text
else:
text = summary_text
chat_conclusion_text = ""
try:
chat_conclusion_text = self.chat_conclusion(text=text)
except Exception as e:
print("conclusion_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len(
"your messages resulted in") + 1
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
conclusion_prompt_token = offset + 800 + 150
chat_conclusion_text = self.chat_conclusion(text=text,
conclusion_prompt_token=conclusion_prompt_token)
htmls.append(chat_conclusion_text)
htmls.append("\n" * 4)
# # 整合成一个文件,打包保存下来。
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
try:
export_path = os.path.join(self.root_path, 'export')
os.makedirs(export_path)
except:
pass
mode = 'w' if paper_index == 0 else 'a'
file_name = os.path.join(export_path,
date_str + '-' + self.validateTitle(paper.title[:80]) + "." + self.file_format)
self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
# file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title)+".md")
# self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
htmls = []
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_conclusion(self, text, conclusion_prompt_token=800):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - conclusion_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a reviewer in the field of [" + self.key_word + "] and you need to critically review this article"},
# chatgpt 角色
{"role": "assistant",
"content": "This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:" + clip_text},
# 背景知识,可以参考OpenReview的审稿流程
{"role": "user", "content": """
8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).
- (1):What is the significance of this piece of work?
- (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload.
.......
Follow the format of the output later:
## 9. Conclusion: \n\n
- (1):xxx;\n
- (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# prompt需要用英语替换,少占用token。
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("conclusion_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_method(self, text, method_prompt_token=800):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - method_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
# chatgpt 角色
{"role": "assistant",
"content": "This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions." + clip_text},
# 背景知识
{"role": "user", "content": """
7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.
- (1):...
- (2):...
- (3):...
- .......
Follow the format of the output that follows:
## 7. Methods: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
....... \n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("method_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
# @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
# stop=tenacity.stop_after_attempt(5),
# reraise=True)
def chat_experi(self, text, experi_prompt_token=2000):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - experi_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
# chatgpt 角色
{"role": "assistant",
"content": "This is the <Experiment> part of an English document, I need your help to read the <Experiment> part and summarize the following questions." + clip_text},
# 背景知识
{"role": "user", "content": """
8. summarize according to the following four points. Be sure to use {} answers (proper nouns need to be marked in English)
- (1):What specific datasets are used in the experiments in the Experiment chapter of this paper?
- (2):What evaluation metrics or protocols were used in the experiment chapter of this paper to evaluate the proposed method?
- (3):Please summarize the definitions of these metrics or protocols which were used in the experiment chapter of this paper.
- (4):Please summarize the experimental results of the proposed methods on each dataset in tabular form. The results must be summarized in tabular form. (proper nouns need to be marked in English)
Follow the format of the output that follows:
## 8. Experiments: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
- (4):xxx;\n
....... \n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("experi_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_summary(self, text, summary_prompt_token=1100):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - summary_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
{"role": "assistant",
"content": "This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: " + clip_text},
{"role": "user", "content": """
1. Mark the title of the paper (with Chinese translation)
2. list all the authors' names (use English)
3. mark the first author's affiliation (output {} translation only)
4. mark the keywords of this article (use English)
5. link to the paper, Github code link (if available, fill in Github:None if not)
6. summarize according to the following four points.Be sure to use {} answers (proper nouns need to be marked in English)
- (1):What is the research background of this article?
- (2):What are the past methods? What are the problems with them? Is the approach well motivated?
- (3):What is the research methodology proposed in this paper?
- (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?
Follow the format of the output that follows:
### 1. Title: \n
- xxx\n\n
### 2. Authors: \n
- xxx\n\n
### 3. Affiliation: \n
- xxx\n\n
### 4. Keywords: \n
- xxx\n\n
### 5. Urls and Code
- Urls: xxx or xxx , xxx \n\n
- Gothub Code: xxx or xxx , xxx \n\n
### 6. Summary: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
- (4):xxx.\n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed.
""".format(self.language, self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("summary_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
def export_to_markdown(self, text, file_name, mode='w'):
# 使用markdown模块的convert方法,将文本转换为html格式
# html = markdown.markdown(text)
# 打开一个文件,以写入模式
with open(file_name, mode, encoding="utf-8") as f:
# 将html格式的内容写入文件
f.write(text)
# 定义一个方法,打印出读者信息
def show_info(self):
print(f"Key word: {self.key_word}")
print(f"Query: {self.query}")
print(f"Sort: {self.sort}")
def main(args):
# 创建一个Reader对象,并调用show_info方法
if args.sort == 'Relevance':
sort = arxiv.SortCriterion.Relevance
elif args.sort == 'LastUpdatedDate':
sort = arxiv.SortCriterion.LastUpdatedDate
else:
sort = arxiv.SortCriterion.Relevance
if args.pdf_path:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
# 开始判断是路径还是文件:
paper_list = []
if args.pdf_path.endswith(".pdf"):
paper_list.append(Paper(path=args.pdf_path))
else:
for root, dirs, files in os.walk(args.pdf_path):
print("root:", root, "dirs:", dirs, 'files:', files) # 当前目录路径
for filename in files:
# 如果找到PDF文件,则将其复制到目标文件夹中
if filename.endswith(".pdf"):
paper_list.append(Paper(path=os.path.join(root, filename)))
print("------------------paper_num: {}------------------".format(len(paper_list)))
[print(paper_index, paper_name.path.split('\\')[-1]) for paper_index, paper_name in enumerate(paper_list)]
reader1.summary_with_chat(paper_list=paper_list)
else:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
filter_results = reader1.filter_arxiv(max_results=args.max_results)
paper_list = reader1.download_pdf(filter_results)
reader1.summary_with_chat(paper_list=paper_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument("--pdf_path", type=str, default=r'demo.pdf', help="if none, the bot will download from arxiv with query")
# parser.add_argument("--pdf_path", type=str, default=r'C:\Users\Administrator\Desktop\DHER\RHER_Reset\ChatPaper', help="if none, the bot will download from arxiv with query")
parser.add_argument("--pdf_path", type=str, default='', help="if none, the bot will download from arxiv with query")
parser.add_argument("--query", type=str, default='all: ChatGPT robot',
help="the query string, ti: xx, au: xx, all: xx,")
parser.add_argument("--key_word", type=str, default='reinforcement learning',
help="the key word of user research fields")
parser.add_argument("--filter_keys", type=str, default='ChatGPT robot',
help="the filter key words, 摘要中每个单词都得有,才会被筛选为目标论文")
parser.add_argument("--max_results", type=int, default=1, help="the maximum number of results")
# arxiv.SortCriterion.Relevance
parser.add_argument("--sort", type=str, default="Relevance", help="another is LastUpdatedDate")
parser.add_argument("--save_image", default=False,
help="save image? It takes a minute or two to save a picture! But pretty")
parser.add_argument("--file_format", type=str, default='md', help="导出的文件格式,如果存图片的话,最好是md,如果不是的话,txt的不会乱")
parser.add_argument("--language", type=str, default='zh', help="The other output lauguage is English, is en")
args = parser.parse_args()
import time
start_time = time.time()
main(args=args)
print("summary time:", time.time() - start_time)
| [
" \n 8. summarize according to the following four points. Be sure to use {} answers (proper nouns need to be marked in English)\n - (1):What specific datasets are used in the experiments in the Experiment chapter of this paper? \n - (2):What evaluation metrics or protocols were used in the experiment chapter of this paper to evaluate the proposed method? \n - (3):Please summarize the definitions of these metrics or protocols which were used in the experiment chapter of this paper.\n - (4):Please summarize the experimental results of the proposed methods on each dataset in tabular form. The results must be summarized in tabular form. (proper nouns need to be marked in English)\n\n Follow the format of the output that follows: \n ## 8. Experiments: \n\n\n - (1):xxx;\n \n - (2):xxx;\n\n - (3):xxx;\n \n - (4):xxx;\n \n ....... \n\n \n\n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed.\n ",
" \n 1. Mark the title of the paper (with Chinese translation)\n 2. list all the authors' names (use English)\n 3. mark the first author's affiliation (output {} translation only) \n 4. mark the keywords of this article (use English)\n 5. link to the paper, Github code link (if available, fill in Github:None if not)\n 6. summarize according to the following four points.Be sure to use {} answers (proper nouns need to be marked in English)\n - (1):What is the research background of this article?\n - (2):What are the past methods? What are the problems with them? Is the approach well motivated?\n - (3):What is the research methodology proposed in this paper?\n - (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?\n Follow the format of the output that follows: \n ### 1. Title: \n \n - xxx\n\n\n ### 2. Authors: \n \n - xxx\n\n\n ### 3. Affiliation: \n\n - xxx\n\n \n ### 4. Keywords: \n\n - xxx\n\n \n ### 5. Urls and Code\n - Urls: xxx or xxx , xxx \n\n\n - Gothub Code: xxx or xxx , xxx \n\n \n ### 6. Summary: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n - (4):xxx.\n\n \n\n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed. \n ",
" \n 7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.\n - (1):...\n - (2):...\n - (3):...\n - .......\n Follow the format of the output that follows: \n ## 7. Methods: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n ....... \n\n \n\n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"This is the <Experiment> part of an English document, I need your help to read the <Experiment> part and summarize the following questions.PLACEHOLDER",
"] and you need to critically review this article",
"This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions.PLACEHOLDER",
" \n 8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).\n - (1):What is the significance of this piece of work?\n - (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload. \n .......\n Follow the format of the output later: \n ## 9. Conclusion: \n\n\n - (1):xxx;\n \n - (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n \n\n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"] who is good at summarizing papers using concise statements",
"This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:PLACEHOLDER",
"This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: PLACEHOLDER",
"You are a researcher in the field of [",
"You are a reviewer in the field of ["
] |
2024-01-10 | SerendipityOrg/TradeMan | BackTest~CSVQueryAgent.py | import streamlit as st
import pandas as pd
import plotly.graph_objects as go
import numpy as np
from scipy.stats import norm
from langchain.agents import create_csv_agent
from langchain.llms import OpenAI
llm = OpenAI(openai_api_key="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")
st.set_page_config(page_title="Ask your CSV")
st.header("Ask your CSV 📈")
csv_file = st.file_uploader("Upload a CSV file", type="csv")
if csv_file is not None:
# Create a CSV agent for your data file
agent = create_csv_agent(OpenAI(temperature=0, openai_api_key='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'), csv_file, verbose=True)
user_question = st.text_input("Ask a question about your CSV: ")
if user_question is not None and user_question != "":
with st.spinner(text="In progress..."):
st.write(agent.run(user_question)) | [] |
2024-01-10 | protikmostafa083/t1_whistleOut | model~chatbot.py | import streamlit as st
from streamlit_chat import message
from langchain.document_loaders import CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import CTransformers
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
import sys
DB_FAISS_PATH = "vectorstore/db_faiss"
# loading model
def load_llm():
# Load locally downloaded nodel
llm = CTransformers(
model="model/llama-2-7b-chat.ggmlv3.q8_0.bin",
model_type="llama",
max_new_tokens=1024,
temperature=0.1
)
return llm
# st.title("Chat with WhistleOut Data")
st.markdown("<h3 style='text-align: center; color: grey'>Chat With WhistleOut</h3>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center; color: grey'>UTS MDSI</h3>", unsafe_allow_html=True)
# load the CSV data
loader = CSVLoader(file_path="data/DimUTSProduct.csv", encoding="utf-8", csv_args={
'delimiter': ','
})
data = loader.load()
st.json(data)
# Split the texts into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
text_chunks = text_splitter.split_documents(data)
embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2",
model_kwargs={'device': 'cpu'}
)
db = FAISS.from_documents(text_chunks, embeddings)
db.save_local(DB_FAISS_PATH)
# call the llm
llm = load_llm()
chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever())
def conversational_chat(query):
result = chain({"question": query, "chat_history": st.session_state['history']})
st.session_state['history'].append((query, result["answer"]))
return result["answer"]
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hey! ask me anything about your data!"]
if 'past' not in st.session_state:
st.session_state['past'] = ["hey!"]
# container for the chat history
response_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Query: ", placeholder="Let's answer your question", key='input')
submit_button = st.form_submit_button(label="chat")
if submit_button and user_input:
output = conversational_chat(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="big-smile")
message(st.session_state["generated"][i], key=str(i), avatar_style="thumbs") | [] |
2024-01-10 | Tanuki/tanuki.py | examples~web_scraper~countries.py | import openai
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from typing import Optional
load_dotenv()
import tanuki
from utils import scrape_url
openai.api_key = os.getenv("OPENAI_API_KEY")
class Country(BaseModel):
name: str
capital: str
population: int
area: float
@tanuki.patch
def extract_country(content: str) -> Optional[Country]:
"""
Examine the content string and extract the country information pertaining to it's
name, capital, population, and area.
"""
@tanuki.align
def align_extract_country() -> None:
print("Aligning...")
country = "\n\n\n U.S. Virgin Islands\n \n\nCapital: Charlotte Amalie\nPopulation: 108708\nArea (km2): 352.0\n\n"
assert extract_country(country) == Country(
name="U.S. Virgin Islands",
capital="Charlotte Amalie",
population=108708,
area=352.0,
)
if __name__ == '__main__':
# Align the function
align_extract_country()
# Web scrape the url and extract the list of countries
url = "https://www.scrapethissite.com/pages/simple/"
contents = scrape_url(url=url, class_name="country")
# Process the country blocks using Tanuki (only sampling a couple for demo purposes)
countries = []
for content in contents[10:12]:
countries.append(extract_country(content))
print(countries)
| [] |
2024-01-10 | Tanuki/tanuki.py | tests~test_patch~test_classification.py | import os
from typing import Optional, Literal, List
import unittest
import openai
from dotenv import load_dotenv
import tanuki
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
@tanuki.patch
def classify_sentiment_2(input: str, input_2: str) -> Optional[Literal['Good', 'Bad']]:
"""
Determine if the inputs are positive or negative sentiment, or None
"""
@tanuki.patch
def classify_sentiment(input: str) -> Optional[Literal['Good', 'Bad']]:
"""
Determine if the input is positive or negative sentiment
"""
@tanuki.align
def align_classify_sentiment():
"""We can test the function as normal using Pytest or Unittest"""
i_love_you = "I love you"
assert classify_sentiment_2(i_love_you, "I love woo") == 'Good'
assert classify_sentiment_2("I hate you", "You're discusting") == 'Bad'
assert classify_sentiment_2("Today is wednesday", "The dogs are running outside") == None
assert classify_sentiment("I love you") == 'Good'
assert classify_sentiment("I hate you") == 'Bad'
assert classify_sentiment("Wednesdays are in the middle of the week") == None
def test_classify_sentiment():
align_classify_sentiment()
bad_input = "I find you awful"
good_input = "I really really like you"
good_input_2 = "I adore you"
assert classify_sentiment("I like you") == 'Good'
assert classify_sentiment(bad_input) == 'Bad'
assert classify_sentiment("I am neutral") == None
assert classify_sentiment_2(good_input, good_input_2) == 'Good'
assert classify_sentiment_2("I do not like you you", bad_input) == 'Bad'
assert classify_sentiment_2("I am neutral", "I am neutral too") == None | [] |
2024-01-10 | Tanuki/tanuki.py | tests~test_align~test_align_global.py | import os
from typing import Literal, Optional
import openai
from dotenv import load_dotenv
import tanuki
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
@tanuki.patch
def classify_sentiment_2(input: str, input_2: str) -> Optional[Literal['Good', 'Bad']]:
"""
Determine if the inputs are positive or negative sentiment, or None
"""
@tanuki.patch
def classify_sentiment(input: str) -> Optional[Literal['Good', 'Bad']]:
"""
Determine if the input is positive or negative sentiment
"""
@tanuki.align
def align_classify_sentiment():
"""We can test the function as normal using Pytest or Unittest"""
i_love_you = "I love you"
print(classify_sentiment_2(i_love_you, "I love woo"))
assert classify_sentiment_2(i_love_you, "I love woo") == 'Good'
print(classify_sentiment("I love you"))
assert classify_sentiment("I love you") == 'Good'
assert classify_sentiment("I hate you") == 'Bad'
#assert classify_sentiment("I hate you") != 'Good'
assert not classify_sentiment("Wednesdays are in the middle of the week")
if __name__ == '__main__':
align_classify_sentiment()
| [] |
2024-01-10 | Tanuki/tanuki.py | examples~web_scraper~cars.py | import openai
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from typing import List, Optional
load_dotenv()
import tanuki
from utils import scrape_url
openai.api_key = os.getenv("OPENAI_API_KEY")
class Car(BaseModel):
price: float
mpg: str
seating: int
horsepower: int
weight: int
fuel_size: float
warranty_basic: str
warranty_powertrain: str
warranty_roadside: str
@tanuki.patch
def extract_car(content: str) -> Optional[Car]:
"""
Examine the content string and extract the car details for the price, miles per gallon, seating, horsepower,
weight, fuel tank size, and warranty.
"""
if __name__ == '__main__':
# Web scrape the url and extract the car information
# url = "https://www.cars.com/research/ford-mustang-2024/"
url = "https://www.cars.com/research/mazda-cx_90-2024/"
contents = scrape_url(url=url)
print(contents)
# Process the cocktail block using Tanuki
car = extract_car(contents[0])
print(car)
| [] |
2024-01-10 | Tanuki/tanuki.py | src~tanuki~function_modeler.py | import ast
import datetime
import io
import json
from typing import List, Tuple, Dict
import openai
from tanuki.constants import EXAMPLE_ELEMENT_LIMIT, PATCHES, SYMBOLIC_ALIGNMENTS, POSITIVE_EMBEDDABLE_ALIGNMENTS, \
NEGATIVE_EMBEDDABLE_ALIGNMENTS
from tanuki.language_models.llm_finetune_api_abc import LLM_Finetune_API
from tanuki.models.finetune_job import FinetuneJob
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.utils import approximate_token_count, prepare_object_for_saving, encode_int, decode_int
import copy
class FunctionModeler(object):
"""
This class manages the registered function models and their datasets
comprised of symbolic and embeddable alignments, and symbolic and embeddable patches
"""
def __init__(self, data_worker: DatasetWorker,
environment_id=0,
api_providers: Dict[str, LLM_Finetune_API] = None
) -> None:
self.function_configs = {}
self.data_worker = data_worker
self.distillation_token_limit = 3000 # the token limit for finetuning
self.symbolic_align_buffer = {}
self.embeddable_align_buffer = {}
self._get_datasets()
self.environment_id = environment_id
self.check_finetune_blacklist = []
self.execute_finetune_blacklist = []
self.store_data_blacklist = []
self.api_providers = api_providers
def _get_dataset_info(self, dataset_type, func_hash, type="length"):
"""
Get the dataset size for a function hash
"""
return self.data_worker.load_dataset(dataset_type, func_hash, return_type=type)
def _get_datasets(self):
"""
Get the existing datasets from the data worker
"""
self.dataset_sizes = self.data_worker.load_existing_datasets()
def save_embeddable_align_statements(self,
function_hash: str,
args,
kwargs,
positive_pairs: List[Tuple[List, Dict]],
negative_pairs: List[Tuple[List, Dict]]):
"""
Save the contrastive align statements for the embeddable function.
Do not save if the function hash is in the store data blacklist
Args:
function_hash: A unique hash for the function
args: The arguments of the function
kwargs: The keyword arguments of the function
positive_pairs: A list of the other function invocations that are should have equivalent embeddings
negative_pairs: A list of the other function invocations that are should have different embeddings
"""
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
# prepare positive pairs for saving
parsed_positive_pairs = []
for pair in positive_pairs:
copy_pair = copy.deepcopy(pair)
parsed_pair = prepare_object_for_saving(copy_pair)
parsed_positive_pairs.append(parsed_pair)
# prepare negative pairs for saving
parsed_negative_pairs = []
for pair in negative_pairs:
copy_pair = copy.deepcopy(pair)
parsed_pair = prepare_object_for_saving(copy_pair)
parsed_negative_pairs.append(parsed_pair)
# save the contrastive pairs
for pair in parsed_positive_pairs:
self._save_contrastive_alignment_pair(function_hash, parsed_args, parsed_kwargs, pair, positive=True)
for pair in parsed_negative_pairs:
self._save_contrastive_alignment_pair(function_hash, parsed_args, parsed_kwargs, pair, positive=False)
def _save_contrastive_alignment_pair(self, function_hash: str, args, kwargs, pair, positive=True):
"""
Save a contrastive pair
"""
example = FunctionExample(args, kwargs, pair)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_embeddable_align(function_hash, example, positive)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if positive:
if function_hash in self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[POSITIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if not positive:
if function_hash in self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS]:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[NEGATIVE_EMBEDDABLE_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.embeddable_align_buffer:
self.embeddable_align_buffer[function_hash] = bytearray()
self.embeddable_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
def save_symbolic_align_statements(self, function_hash, args, kwargs, output):
"""
Save the align statements and add to the align buffer
Do not save if the function hash is in the store data blacklist
Then just add the datapoints to the align buffer
"""
# prepare output for saving and later parsing
# make a deepcopy of the output to avoid changing the original object
copy_output = copy.deepcopy(output)
parsed_output = prepare_object_for_saving(copy_output)
# prepare args and kwargs for saving
copy_args = copy.deepcopy(args)
copy_kwargs = copy.deepcopy(kwargs)
parsed_args = prepare_object_for_saving(copy_args)
parsed_kwargs = prepare_object_for_saving(copy_kwargs)
example = FunctionExample(parsed_args, parsed_kwargs, parsed_output)
if function_hash not in self.store_data_blacklist:
successfully_saved, new_datapoint = self.data_worker.log_symbolic_align(function_hash, example)
else:
successfully_saved = False
new_datapoint = True
if successfully_saved:
if function_hash in self.dataset_sizes[SYMBOLIC_ALIGNMENTS]:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] += 1
else:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 1
if new_datapoint:
# update align buffer
if function_hash not in self.symbolic_align_buffer:
self.symbolic_align_buffer[function_hash] = bytearray()
self.symbolic_align_buffer[function_hash].extend(str(example.__dict__).encode('utf-8') + b'\r\n')
def save_symbolic_datapoint(self, func_hash, example):
"""
Save datapoint to the training data
"""
written_datapoints = self.data_worker.log_symbolic_patch(func_hash, example)
for func_hash, datapoints in written_datapoints.items():
if func_hash in self.dataset_sizes[PATCHES]:
# if the dataset size is -1, it means we havent read in the dataset size yet
if self.dataset_sizes[PATCHES][func_hash] == -1:
self.dataset_sizes[PATCHES][func_hash] = self._get_dataset_info(PATCHES, func_hash, type="length")
else:
self.dataset_sizes[PATCHES][func_hash] += datapoints
else:
self.dataset_sizes[PATCHES][func_hash] = datapoints
return len(written_datapoints) > 0
def get_symbolic_alignments(self, func_hash, max=20):
"""
Get all symbolic aligns for a function hash
"""
if func_hash not in self.symbolic_align_buffer:
return []
buffer = self.symbolic_align_buffer[func_hash]
return self._get_examples_from_alignment_buffer(buffer, max)
def get_embeddable_alignments(self, func_hash, max=20):
"""
Get all embeddable aligns for a function hash
"""
if func_hash not in self.embeddable_align_buffer:
return []
buffer = self.embeddable_align_buffer[func_hash]
return self._get_examples_from_alignment_buffer(buffer, max)
def _get_examples_from_alignment_buffer(self, buffer, max=20):
"""
Get examples from a buffer
"""
split_buffer = bytes(buffer).split(b"\n")
# byte array of stringed python dicts into dict objects
example_set = set()
for example in split_buffer:
if example == b"":
continue
example_set.add(example)
# easy and straightforward way to get nr of words (not perfect but doesnt need to be)
# Can do the proper way of tokenizing later, it might be slower and we dont need 100% accuracy
example_element_limit = EXAMPLE_ELEMENT_LIMIT
examples = []
for example_bytes in split_buffer:
if example_bytes in example_set:
nr_of_elements = approximate_token_count(example_bytes)
example_element_limit -= nr_of_elements
if example_element_limit < 0:
break
example = example_bytes.decode('utf-8')
# json load the example
try:
example = json.loads(example)
except:
example = ast.literal_eval(example)
examples.append(example)
example_set.remove(example_bytes)
return list(examples)[:max]
def load_symbolic_align_statements(self, function_hash):
"""
Load all align statements
First check the data storage blacklist,
if the func hash is in the blacklist, then set the dataset size to 0 and the align buffer to empty bytearray
"""
if function_hash in self.store_data_blacklist:
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = 0
self.symbolic_align_buffer[function_hash] = bytearray()
elif function_hash not in self.symbolic_align_buffer:
dataset_size, align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, function_hash, type="both")
if align_dataset:
self.symbolic_align_buffer[function_hash] = bytearray(align_dataset)
self.dataset_sizes[SYMBOLIC_ALIGNMENTS][function_hash] = dataset_size
def postprocess_symbolic_datapoint(self, func_hash, function_description, example, repaired=True):
"""
Postprocess the datapoint
First check if the datapoint should be added to the training data
Add the datapoint if it should be added
Then check if the function should be finetuned and execute finetuning if it should
"""
try:
if func_hash not in self.store_data_blacklist:
added = self.save_symbolic_datapoint(func_hash, example)
if added:
self._update_datapoint_config(repaired, func_hash)
except Exception as e:
print(e)
print("Could not add datapoint to training data")
if func_hash not in self.execute_finetune_blacklist:
self.check_for_finetuning(function_description, func_hash)
def load_function_config(self, func_hash, function_description):
"""
Load the config file for a function hash
"""
config, default = self.data_worker.load_function_config(func_hash)
if default and func_hash not in self.check_finetune_blacklist:
finetuned, finetune_config = self._check_for_finetunes(function_description)
if finetuned:
config = finetune_config
self.function_configs[func_hash] = config
return config
def _check_for_finetunes(self, function_description: FunctionDescription) -> Tuple[bool, Dict]:
# This here should be discussed, what's the bestd way to do it
# hash the function_hash into 16 characters (to embed it into the name of OpenAI finetunes, for later retrieval)
finetune_hash = function_description.__hash__(purpose="finetune") + encode_int(self.environment_id)
# List 10 fine-tuning jobs
finetunes: List[FinetuneJob] = self.api_providers["openai"].list_finetuned(limit=1000)
# Check if the function_hash is in the fine-tuning jobs
# the finetunes are in chronological order starting from newest
# So this gets the latest finetune
for finetune in finetunes:
# check if the finetune hash is in the fine-tuned model name
if finetune.status == "succeeded" and finetune_hash in finetune.fine_tuned_model:
try:
config = self._construct_config_from_finetune(finetune_hash, finetune)
# save the config
self.data_worker.update_function_config(function_description.__hash__(), config)
return True, config
except:
return False, {}
return False, {}
def _construct_config_from_finetune(self, finetune_hash, finetune: FinetuneJob):
model = finetune.fine_tuned_model
# get the ending location of finetune hash in the model name
finetune_hash_end = model.find(finetune_hash) + len(finetune_hash)
# get the next character after the finetune hash
next_char = model[finetune_hash_end]
# get the number of training runs
nr_of_training_runs = decode_int(next_char) + 1
nr_of_training_points = (2 ** (nr_of_training_runs - 1)) * 200
config = {
"distilled_model": model,
"current_model_stats": {
"trained_on_datapoints": nr_of_training_points,
"running_faults": []},
"last_training_run": {"trained_on_datapoints": nr_of_training_points},
"current_training_run": {},
"teacher_models": ["gpt-4", "gpt-4-32k"], # currently supported teacher models
"nr_of_training_runs": nr_of_training_runs}
return config
def get_models(self, function_description):
"""
Return the current model from the config file
"""
func_hash = function_description.__hash__()
if func_hash in self.function_configs:
func_config = self.function_configs[func_hash]
else:
func_config = self.load_function_config(func_hash, function_description)
# for backwards compatibility
if "distilled_model" not in func_config:
if func_config["current_model"] in func_config["teacher_models"]:
distilled_model = ""
else:
distilled_model = func_config["current_model"]
else:
distilled_model = func_config["distilled_model"]
return distilled_model, func_config["teacher_models"]
def _update_datapoint_config(self, repaired, func_hash):
"""
Update the config to reflect the new datapoint in the training data
First adds 1 to the current datapoints
Then updates running faults depending if priority is True or not and takes last 100
Then checks the revert condition, i.e if last 10 datapoints are 50% faulty
Finally updates the config file
Args:
priority (bool): whether the datapoint was fixed by the teacher model/should be added to the training data
"""
try:
if repaired:
self.function_configs[func_hash]["current_model_stats"]["running_faults"].append(1)
else:
self.function_configs[func_hash]["current_model_stats"]["running_faults"].append(0)
# take the last 100 datapoints
self.function_configs[func_hash]["current_model_stats"]["running_faults"] = \
self.function_configs[func_hash]["current_model_stats"]["running_faults"][-100:]
# check if the last 10 datapoints are 50% faulty, this is the switch condition
if sum(self.function_configs[func_hash]["current_model_stats"]["running_faults"][-10:]) / 10 > 0.5:
self.function_configs[func_hash]["distilled_model"] = ""
self.function_configs[func_hash]["current_model_stats"]["trained_on_datapoints"] = 0
self.function_configs[func_hash]["current_model_stats"]["running_faults"] = []
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file")
pass
def _update_config_file(self, func_hash):
self.data_worker.update_function_config(func_hash, self.function_configs[func_hash])
def check_for_finetuning(self, function_description, func_hash):
"""
Check for finetuning status
If already finetuning, check for finetuning status
If not finetuning, check for finetuning condition and execute finetuning if condition is met
"""
try:
# check if already finetuning
if "job_id" in self.function_configs[func_hash]["current_training_run"]:
# check for job status
self._check_finetuning_status(func_hash)
else:
# check for finetuning condition
if self._check_finetuning_condition(func_hash):
self._execute_finetuning(function_description, func_hash)
except Exception as e:
print(e)
print("Error checking for finetuning")
def _check_finetuning_condition(self, func_hash):
"""
Check if the finetuning condition is met
Currently finetuning condition is dependent on the number of symbolic datapoints since last finetuning
"""
if func_hash not in self.function_configs:
return False
training_threshold = (2 ** self.function_configs[func_hash]["nr_of_training_runs"]) * 200
align_dataset_size = self.dataset_sizes[SYMBOLIC_ALIGNMENTS][func_hash] if func_hash in self.dataset_sizes[
SYMBOLIC_ALIGNMENTS] else 0
patch_dataset_size = self.dataset_sizes[PATCHES][func_hash] if func_hash in self.dataset_sizes[PATCHES] else 0
if patch_dataset_size == -1:
# if havent read in the patch dataset size, read it in
patch_dataset_size = self._get_dataset_info(PATCHES, func_hash, type="length")
self.dataset_sizes[PATCHES][func_hash] = patch_dataset_size
return (patch_dataset_size + align_dataset_size) > training_threshold
def _execute_finetuning(self, function_description, func_hash):
"""
Execute the finetuning
First create the OpenAI compatible dataset with jsonL file and upload it
Then submit the OpenAI finetuning job
Finally update the config file to reflect the new finetuning job as current
"""
# get function description
function_string = str(function_description.__dict__.__repr__() + "\n")
# get the align dataset
align_dataset = self._get_dataset_info(SYMBOLIC_ALIGNMENTS, func_hash, type="dataset")
if not align_dataset:
align_dataset = ""
else:
align_dataset = align_dataset.decode('utf-8')
# get the patch dataset
patch_dataset = self._get_dataset_info(PATCHES, func_hash, type="dataset")
if not patch_dataset:
patch_dataset = ""
else:
patch_dataset = patch_dataset.decode('utf-8')
if align_dataset == "" and patch_dataset == "":
return
dataset = align_dataset + patch_dataset
dataset.replace("\\n", "[SEP_TOKEN]")
dataset = dataset.split("\n")
dataset = [x.replace("[SEP_TOKEN]", "\\n") for x in dataset if x != ""]
# read in the dataset file
dataset = [ast.literal_eval(x) for x in dataset]
#
# create the openai dataset
instruction = "You are given below a function description and input data. The function description of what the function must carry out can be found in the Function section, with input and output type hints. The input data can be found in Input section. Using the function description, apply the function to the Input and return a valid output type, that is acceptable by the output_class_definition and output_class_hint. Return None if you can't apply the function to the input or if the output is optional and the correct output is None.\nINCREDIBLY IMPORTANT: Only output a JSON-compatible string in the correct response format."
finetuning_dataset = [{"messages": [
{
"role": "system",
"content": f"You are a skillful and accurate language model, who applies a described function on input data. Make sure the function is applied accurately and correctly and the outputs follow the output type hints and are valid outputs given the output types."
},
{"role": "user",
"content": f"{instruction}\nFunction: {function_string}---\nInputs:\nArgs: {x['args']}\nKwargs: {x['kwargs']}\nOutput:"},
{"role": "assistant", "content": str(x['output']) if x['output'] is not None else "None"}]}
for x in dataset]
# Create an in-memory text stream
temp_file = io.BytesIO()
# Write data to the stream
for idx, item in enumerate(finetuning_dataset):
temp_file.write(json.dumps(item).encode('utf-8'))
if idx != len(finetuning_dataset) - 1:
temp_file.write("\n".encode('utf-8'))
# Reset the stream position to the beginning
temp_file.seek(0)
# create the finetune hash
finetune_hash = function_description.__hash__(purpose="finetune")
nr_of_training_runs = self.function_configs[func_hash]["nr_of_training_runs"]
finetune_hash += encode_int(self.environment_id)
finetune_hash += encode_int(nr_of_training_runs)
# here can be sure that datasets were read in as that is checked in the finetune_check
align_dataset_size = self.dataset_sizes[SYMBOLIC_ALIGNMENTS][func_hash] if func_hash in self.dataset_sizes[
SYMBOLIC_ALIGNMENTS] else 0
patch_dataset_size = self.dataset_sizes[PATCHES][func_hash] if func_hash in self.dataset_sizes[PATCHES] else 0
total_dataset_size = align_dataset_size + patch_dataset_size
# Use the stream as a file
try:
finetuning_response: FinetuneJob = self.api_providers["openai"].finetune(file=temp_file, suffix=finetune_hash)
except Exception as e:
return
self.function_configs[func_hash]["current_training_run"] = {"job_id": finetuning_response.id,
"trained_on_datapoints": total_dataset_size,
"last_checked": datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")}
# update the config json file
try:
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file to register a finetuning run")
def _check_finetuning_status(self, func_hash):
"""
Check the status of the current finetuning job
If the job is finished, update the config file to reflect the new model
"""
job_id = self.function_configs[func_hash]["current_training_run"]["job_id"]
last_checked = self.function_configs[func_hash]["current_training_run"]["last_checked"]
# check if last checked was more than 30 mins ago
if (datetime.datetime.now() - datetime.datetime.strptime(last_checked,
"%Y-%m-%d %H:%M:%S")).total_seconds() > 1800:
response = self.api_providers["openai"].get_finetuned(job_id)
self.function_configs[func_hash]["current_training_run"]["last_checked"] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")
if response.status == "succeeded" or response.status == "failed":
self._update_finetune_config(response, func_hash, response.status)
else:
self._update_config_file(func_hash)
def _update_finetune_config(self, response: FinetuneJob, func_hash, status):
"""
Update the config file to reflect the new model and switch the current model to the finetuned model
"""
if status == "failed":
self.function_configs[func_hash]["current_training_run"] = {}
else:
self.function_configs[func_hash]["distilled_model"] = response.fine_tuned_model
self.function_configs[func_hash]["last_training_run"] = self.function_configs[func_hash][
"current_training_run"]
self.function_configs[func_hash]["current_model_stats"] = {
"trained_on_datapoints": self.function_configs[func_hash]["current_training_run"][
"trained_on_datapoints"],
"running_faults": []}
self.function_configs[func_hash]["nr_of_training_runs"] += 1
self.function_configs[func_hash]["current_training_run"] = {}
try:
self._update_config_file(func_hash)
except Exception as e:
print(e)
print("Could not update config file after a successful finetuning run")
pass
| [
"You are a skillful and accurate language model, who applies a described function on input data. Make sure the function is applied accurately and correctly and the outputs follow the output type hints and are valid outputs given the output types.",
"You are given below a function description and input data. The function description of what the function must carry out can be found in the Function section, with input and output type hints. The input data can be found in Input section. Using the function description, apply the function to the Input and return a valid output type, that is acceptable by the output_class_definition and output_class_hint. Return None if you can't apply the function to the input or if the output is optional and the correct output is None.\nINCREDIBLY IMPORTANT: Only output a JSON-compatible string in the correct response format.\nFunction: PLACEHOLDER---\nInputs:\nArgs: PLACEHOLDER\nKwargs: PLACEHOLDER\nOutput:",
"None"
] |
2024-01-10 | Tanuki/tanuki.py | tests~test_configure_MP.py | from typing import List
from tanuki.register import Register
import os
from typing import Optional, Literal, List
import openai
from dotenv import load_dotenv
import tanuki
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
@tanuki.patch
def classify_sentiment_2(input: str, input_2: str) -> Optional[Literal['Good', 'Bad']]:
"""
Determine if the inputs are positive or negative sentiment, or None
"""
@tanuki.patch(environment_id = 12, ignore_finetune_fetching=True, ignore_finetuning=True, ignore_data_storage=True)
def classify_sentiment(input: str) -> Optional[Literal['Good', 'Bad']]:
"""
Determine if the input is positive or negative sentiment
"""
@tanuki.align
def align_classify_sentiment():
"""We can test the function as normal using Pytest or Unittest"""
i_love_you = "I love you"
assert classify_sentiment_2(i_love_you, "I love woo") == 'Good'
assert classify_sentiment_2("I hate you", "You're discusting") == 'Bad'
assert classify_sentiment_2("Today is wednesday", "The dogs are running outside") == None
assert classify_sentiment("I love you") == 'Good'
assert classify_sentiment("I hate you") == 'Bad'
assert classify_sentiment("Wednesdays are in the middle of the week") == None
def test_classify_sentiment():
align_classify_sentiment()
bad_input = "I find you awful"
good_input = "I really really like you"
good_input_2 = "I adore you"
assert classify_sentiment("I like you") == 'Good'
assert classify_sentiment(bad_input) == 'Bad'
assert classify_sentiment("I am neutral") == None
assert classify_sentiment_2(good_input, good_input_2) == 'Good'
assert classify_sentiment_2("I do not like you you", bad_input) == 'Bad'
assert classify_sentiment_2("I am neutral", "I am neutral too") == None
def test_configurability():
classify_sent_description = Register.load_function_description(classify_sentiment)
classify_sentiment_2_description = Register.load_function_description(classify_sentiment_2)
sent_func_hash = classify_sent_description.__hash__()
sent_func_2_hash = classify_sentiment_2_description.__hash__()
func_modeler = tanuki.function_modeler
assert func_modeler.environment_id == 12
assert sent_func_hash in func_modeler.check_finetune_blacklist
assert sent_func_2_hash not in func_modeler.check_finetune_blacklist
assert sent_func_hash in func_modeler.execute_finetune_blacklist
assert sent_func_2_hash not in func_modeler.execute_finetune_blacklist
assert sent_func_hash in func_modeler.store_data_blacklist
assert sent_func_2_hash not in func_modeler.store_data_blacklist
| [] |
2024-01-10 | Tanuki/tanuki.py | examples~web_scraper~cocktail.py | import openai
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from typing import List, Optional
load_dotenv()
import tanuki
from utils import scrape_url
openai.api_key = os.getenv("OPENAI_API_KEY")
class Cocktail(BaseModel):
name: str
ingredients: List[str] = []
instructions: str
similar: List[str] = []
@tanuki.patch
def extract_cocktail(content: str) -> Optional[Cocktail]:
"""
Examine the content string and extract the cocktail details for the ingredients, instructions, and similar cocktails.
"""
@tanuki.align
def align_extract_cocktail() -> None:
print("Aligning...")
cocktail = """Black Rose | Kindred Cocktails\n\n\n\n\n\n Skip to main content\n \n\n\n\n\n\nKindred Cocktails\n\n\nToggle navigation\n\n\n\n\n\n\n\n\nMain navigation\n\n\nHome\n\n\nCocktails\n\n\nNew\n\n\nInfo \n\n\nStyle guidelines\n\n\nIngredients\n\n\n\n\n\nMeasurement units\n\n\nHistoric Cocktail Books\n\n\nRecommended Brands\n\n\nAmari & Friends\n\n\nArticles & Reviews\n\n\n\n\n\nAbout us\n\n\nLearn More\n\n\nFAQ\n\n\nTerms of Use\n\n\nContact us\n\n\n\n\nYou \n\n\nLog in\n\n\nSign Up\n\n\nReset your password\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nHome\n\n\nCocktails\n\n\n Black Rose\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nCopy\n\n\n\n\nBlack Rose\n \n\n\n\n\n\n\n\n\n\n2 oz Bourbon\n\n1 ds Grenadine\n\n2 ds Peychaud's Bitters\n\n1 Lemon peel (flamed, for garnish)\n\n\n\nInstructions\nFill an old-fashioned glass three-quarters full with ice. Add the bourbon, grenadine, and bitters, and stir. Garnish with the lemon peel.\n\n\n\n\n\n\nCocktail summary\n\n\n\nPosted by\nThe Boston Shaker\n on \n4/12/2011\n\n\n\n\nIs of\nunknown authenticity\n\n\nReference\nDale Degroff, The Essential Cocktail, p48\n\n\n\nCurator\nNot yet rated\n\n\nAverage\n3.5 stars (6 ratings)\n\n\n\nYieldsDrink\n\n\nScale\n\n\nBourbon, Peychaud's Bitters, Grenadine, Lemon peel\nPT5M\nPT0M\nCocktail\nCocktail\n1\ncraft, alcoholic\n3.66667\n6\n\n\n\n\n\n\n\n\n\n\nCocktail Book\n\nLog in or sign up to start building your Cocktail Book.\n\n\n\n\nFrom other usersWith a modest grenadine dash, this drink didn't do much for me, but adding a bit more won me over.\nSimilar cocktailsNew Orleans Cocktail — Bourbon, Peychaud's Bitters, Orange Curaçao, Lemon peelOld Fashioned — Bourbon, Bitters, Sugar, Lemon peelBattle of New Orleans — Bourbon, Peychaud's Bitters, Absinthe, Orange bitters, Simple syrupImproved Whiskey Cocktail — Bourbon, Bitters, Maraschino Liqueur, Absinthe, Simple syrup, Lemon peelDerby Cocktail — Bourbon, Bénédictine, BittersMother-In-Law — Bourbon, Orange Curaçao, Maraschino Liqueur, Peychaud's Bitters, Bitters, Torani Amer, Simple syrupMint Julep — Bourbon, Rich demerara syrup 2:1, MintThe Journey — Bourbon, Mezcal, Hazelnut liqueurBenton's Old Fashioned — Bourbon, Bitters, Grade B maple syrup, Orange peelFancy Mint Julep — Bourbon, Simple syrup, Mint, Fine sugar\n\nComments\n\n\n\n\n\nLog in or register to post comments\n\n\n\n\n\n\n\n\n© 2010-2023 Dan Chadwick. Kindred Cocktails™ is a trademark of Dan Chadwick."""
assert extract_cocktail(cocktail) == Cocktail(
name="Black Rose",
ingredients=["2 oz Bourbon", "1 ds Grenadine", "2 ds Peychaud's Bitters", "1 Lemon peel (flamed, for garnish)"],
instructions="Fill an old-fashioned glass three-quarters full with ice. Add the bourbon, grenadine, and bitters, and stir. Garnish with the lemon peel.",
similar=["New Orleans Cocktail", "Old Fashioned", "Battle of New Orleans", "Improved Whiskey Cocktail", "Derby Cocktail", "Mother-In-Law", "Mint Julep", "The Journey", "Benton's Old Fashioned", "Fancy Mint Julep"],
)
if __name__ == '__main__':
# Align the function
align_extract_cocktail()
# Web scrape the url and extract the cocktail information
url = "https://kindredcocktails.com/cocktail/old-fashioned"
# url = "https://kindredcocktails.com/cocktail/journey"
contents = scrape_url(url=url)
print(contents)
# Process the cocktail block using Tanuki
cocktail = extract_cocktail(contents[0])
print(cocktail)
| [] |
2024-01-10 | Tanuki/tanuki.py | src~tanuki~aligns~align_classify_sentiment.py | import os
import openai
from dotenv import load_dotenv
import tanuki
import unittest
from typing import Literal, Optional
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
class TestClassifySentiment(unittest.TestCase):
@tanuki.patch
def classify_sentiment_2(self, input: str, input_2: str) -> Optional[Literal['Good', 'Bad']]:
"""
Determine if the inputs are positive or negative sentiment, or None
"""
@tanuki.patch
def classify_sentiment(self, input: str) -> Optional[Literal['Good', 'Bad']]:
"""
Determine if the input is positive or negative sentiment
"""
@tanuki.align
def test_align_classify_sentiment(self):
"""We can test the function as normal using Pytest or Unittest"""
i_love_you = "I love you"
print(self.classify_sentiment_2(i_love_you, "I love woo"))
assert self.classify_sentiment_2(i_love_you, "I love woo") == 'Good'
self.assertEqual(self.classify_sentiment_2(i_love_you, "I love woo"), 'Good')
print(self.classify_sentiment("I love you"))
assert self.classify_sentiment("I love you") == 'Good'
assert self.classify_sentiment("I hate you") == 'Bad'
assert self.classify_sentiment("I hate you") != 'Good'
assert not self.classify_sentiment("Wednesdays are in the middle of the week")
if __name__ == '__main__':
unittest.main()
| [] |
2024-01-10 | Tanuki/tanuki.py | examples~web_scraper~jobs.py | import openai
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from typing import Optional
load_dotenv()
import tanuki
from utils import scrape_url
openai.api_key = os.getenv("OPENAI_API_KEY")
class Job(BaseModel):
position: str
company: str
location: str
@tanuki.patch
def extract_job(content: str) -> Optional[Job]:
"""
Examine the content string and extract the job details for the position title, company, and location.
"""
@tanuki.align
def align_extract_job() -> None:
print("Aligning...")
job = "\n\n\n\n\n\n\n\n\nShip broker\nFuentes, Walls and Castro\n\n\n\n\n Michelleville, AP\n \n\n2021-04-08\n\n\n\nLearn\nApply\n\n\n"
assert extract_job(job) == Job(
position="Ship broker",
company="Fuentes, Walls and Castro",
location="Michelleville, AP",
)
if __name__ == '__main__':
# Align the function
align_extract_job()
# Web scrape the url and extract the list of jobs
url = "https://realpython.github.io/fake-jobs/"
contents = scrape_url(url=url, class_name="card")
# Process the job blocks using Tanuki (only sampling a couple for demo purposes)
jobs = []
for content in contents[1:3]:
jobs.append(extract_job(content))
print(jobs)
| [] |
2024-01-10 | Tanuki/tanuki.py | examples~web_scraper~streeteasy.py | from numpy import square
import openai
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from typing import List, Optional
load_dotenv()
import tanuki
from utils import scrape_url
openai.api_key = os.getenv("OPENAI_API_KEY")
class Property(BaseModel):
neighborhood: str
address: str
price: float
fee: bool
beds: float
bath: float
listed_by: str
@tanuki.patch
def extract_property(content: str) -> Optional[Property]:
"""
Examine the content string and extract the rental property details for the neighborhood, address,
price, number of beds, number of bathrooms, square footage, and company that is listing the property.
"""
@tanuki.align
def align_extract_property() -> None:
print("Aligning...")
unit_one = "Rental Unit in Lincoln Square\n \n\n\n229 West 60th Street #7H\n\n\n\n$7,250\nNO FEE\n\n\n\n\n\n\n\n\n2 Beds\n\n\n\n\n2 Baths\n\n\n\n\n\n 1,386\n square feet\nsq_ft\n\n\n\n\n\n Listing by Algin Management"
assert extract_property(unit_one) == Property(
neighborhood="Lincoln Square",
address="229 West 60th Street #7H",
price=7250.0,
fee=False,
beds=2.0,
bath=2.0,
listed_by="Algin Management",
)
if __name__ == '__main__':
# Align the function
align_extract_property()
# Web scrape the url and extract the rental property details
url = "https://streeteasy.com/2-bedroom-apartments-for-rent/manhattan?page=2"
contents = scrape_url(url=url, class_name="listingCardBottom")
print(contents)
# Process the rental property block using Tanuki
units = []
for content in contents[1:3]:
units.append(extract_property(content))
print(units)
| [] |
2024-01-10 | Tanuki/tanuki.py | tests~test_align~test_align_class.py | import os
import unittest
from typing import Literal, Optional
import openai
from dotenv import load_dotenv
import tanuki
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
class TestClassifySentiment(unittest.TestCase):
@tanuki.patch
def classify_sentiment_2(self, input: str, input_2: str) -> Optional[Literal['Good', 'Bad']]:
"""
Determine if the inputs are positive or negative sentiment, or None
"""
@tanuki.patch
def classify_sentiment(self, input: str) -> Optional[Literal['Good', 'Bad']]:
"""
Determine if the input is positive or negative sentiment
"""
@tanuki.align
def test_align_classify_sentiment(self):
"""We can test the function as normal using Pytest or Unittest"""
i_love_you = "I love you"
print(self.classify_sentiment_2(i_love_you, "I love woo"))
assert self.classify_sentiment_2(i_love_you, "I love woo") == 'Good'
assert self.classify_sentiment("I love you") == 'Good'
assert self.classify_sentiment("I hate you") == 'Bad'
assert not self.classify_sentiment("Wednesdays are in the middle of the week")
if __name__ == '__main__':
unittest.main() | [] |
2024-01-10 | Tanuki/tanuki.py | examples~web_scraper~quotes.py | import openai
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from typing import List, Optional
load_dotenv()
import tanuki
from utils import scrape_url
openai.api_key = os.getenv("OPENAI_API_KEY")
class Quote(BaseModel):
text: str
author: str
tags: List[str] = []
@tanuki.patch
def extract_quote(content: str) -> Optional[Quote]:
"""
Examine the content string and extract the quote details for the text, author, and tags.
"""
@tanuki.align
def align_extract_quote() -> None:
print("Aligning...")
quote = "\nIt takes courage to grow up and become who you really are.\nby E.E. Cummings\n(about)\n\n\n Tags:\n \ncourage\n\n"
assert extract_quote(quote) == Quote(
text="It takes courage to grow up and become who you really are.",
author="E.E. Cummings",
tags=["courage"],
)
if __name__ == '__main__':
# Align the function
align_extract_quote()
# Web scrape the url and extract the list of quotes
url = "https://quotes.toscrape.com/page/1/"
contents = scrape_url(url=url, class_name="quote")
# Process the quote blocks using Tanuki (only sampling a couple for demo purposes)
quotes = []
for content in contents[0:2]:
c = content.replace('“', '')
c = c.replace('”', '')
quotes.append(extract_quote(c))
print(quotes)
| [] |
2024-01-10 | Tanuki/tanuki.py | examples~web_scraper~airbnb.py | import openai
import os
from bs4 import BeautifulSoup
from dotenv import load_dotenv
from pydantic import BaseModel
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
from typing import Optional
load_dotenv()
import tanuki
openai.api_key = os.getenv("OPENAI_API_KEY")
class AirBnb(BaseModel):
city: str
state: str
dates: str
price: float
stars: float
@tanuki.patch
def extract_airbnb(content: str) -> Optional[AirBnb]:
"""
Examine the content string and extract the airbnb details for the city, state,
dates available, nightly price, and stars rating.
"""
@tanuki.align
def align_extract_airbnb() -> None:
print("Aligning...")
airbnb1 = "Caroga Lake, New YorkRoyal Mountain Ski ResortDec 3 – 8$200\xa0night$200 per night4.99"
assert extract_airbnb(airbnb1) == AirBnb(
city="Caroga Lake",
state="New York",
dates="Dec 3 - 8",
price=200.0,
stars=4.99,
)
def selenium_driver() -> str:
"""Use selenium to scrape the airbnb url and return the page source."""
# configure webdriver
options = Options()
# options.add_argument('--headless') # Enable headless mode
# options.add_argument('--disable-gpu') # Disable GPU acceleration
# launch driver for the page
driver = webdriver.Chrome(options=options)
driver.get("https://www.airbnb.com/?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&search_mode=flex_destinations_search&flexible_trip_lengths%5B%5D=one_week&location_search=MIN_MAP_BOUNDS&monthly_start_date=2023-12-01&monthly_length=3&price_filter_input_type=0&channel=EXPLORE&search_type=category_change&price_filter_num_nights=5&category_tag=Tag%3A5366")
time.sleep(3)
# refresh the page to remove the dialog modal
driver.refresh()
time.sleep(3)
# Scroll halfway down page to get rest of listings to load
scroll_position = driver.execute_script("return (document.body.scrollHeight - window.innerHeight) * 0.4;")
driver.execute_script(f"window.scrollTo(0, {scroll_position});")
time.sleep(3)
# extract the page source and return
page_source = driver.page_source
driver.quit()
return page_source
if __name__ == '__main__':
# Align the function
align_extract_airbnb()
# Selenium driver to scrape the url and extract the airbnb information
page_source = selenium_driver()
# Beautiful Soup to parse the page source
soup = BeautifulSoup(page_source, 'html.parser')
entities = soup.find_all('div', class_="dir dir-ltr")
# Remove entries that are not airbnb listings
contents = [entity.text for entity in entities if entity.text != ""]
contents = [c for c in contents if "$" in c]
print(contents)
# Tanuki to extract the airbnb information
print("Tanuki Time!")
airbnbs = []
for content in contents[1:3]:
airbnbs.append(extract_airbnb(content))
print(airbnbs)
| [] |
2024-01-10 | Tanuki/tanuki.py | src~tanuki~__init__.py | import ast
import inspect
import json
import logging
import os
import sys
import textwrap
from functools import wraps
from typing import Optional, Union, Any
from unittest.mock import patch as mock_patch
import requests
from tanuki.assertion_visitor import AssertionVisitor
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.embedding_model_manager import EmbeddingModelManager
from tanuki.language_models.language_model_manager import LanguageModelManager
from tanuki.language_models.openai_api import OpenAI_API
from tanuki.models.embedding import Embedding
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.models.function_type import FunctionType
from tanuki.register import Register
from tanuki.trackers.filesystem_buffered_logger import FilesystemBufferedLogger
from tanuki.utils import get_key
from tanuki.validator import Validator
# Define a new level
def _log_align(self, func_hash, *args, **kws):
if self.isEnabledFor(ALIGN_LEVEL_NUM):
args, kwargs, output = args
kwargs['align'] = True
example = FunctionExample(args, kwargs, output)
# Define a safe directory within the project for logs
# (You can make this configurable if needed)
log_directory = os.path.join(os.getcwd(), ALIGN_FILE_NAME)
# Ensure the directory exists
if not os.path.exists(log_directory):
try:
os.makedirs(log_directory)
except OSError as e:
self.error(f"Failed to create log directory: {e}")
return
# Write to the file
log_file_path = os.path.join(log_directory, func_hash)
try:
with open(log_file_path, "a") as f:
f.write(str(example.__dict__) + "\n")
except IOError as e:
self.error(f"Failed to write to log file: {e}")
# Set up logging with custom logger
def logger_factory(name):
return FilesystemBufferedLogger(name)
ALIGN_LEVEL_NUM = 15
PATCH_LEVEL_NUM = 14
ALIGN_FILE_NAME = ".align"
alignable_functions = {}
# Set up basic configuration
logging.setLoggerClass(FilesystemBufferedLogger)
logging.addLevelName(ALIGN_LEVEL_NUM, "ALIGN")
logging.addLevelName(PATCH_LEVEL_NUM, "PATCH")
logging.basicConfig(level=ALIGN_LEVEL_NUM)
logger = logger_factory(__name__)
api_providers = {"openai": OpenAI_API()}
# currently only use buffered logger as default
function_modeler = FunctionModeler(data_worker=logger, api_providers=api_providers)
language_modeler = LanguageModelManager(function_modeler, api_providers=api_providers)
embedding_modeler = EmbeddingModelManager(function_modeler, api_providers=api_providers)
telemetry_enabled: bool = True
@staticmethod
def _load_alignments(func_hash: str):
function_modeler.load_symbolic_align_statements(func_hash)
@staticmethod
def _anonymous_usage(*args, **kwargs):
"""
Post anonymously to the usage server so we know what configs are commonly used in the project.
:return:
"""
if not telemetry_enabled:
return
try:
requests.post('https://idhhnusnhkkjkpwkm1fr.monkeypatch.ai/telemetry', data=json.dumps(kwargs))
except:
pass
@staticmethod
def align(test_func):
"""
Decorator to align a function.
By adding the @align decorator to a function, we can declare the desired input-output
behaviour of the patched functions using assertions.
:param test_func:
:return:
"""
@wraps(test_func)
def wrapper(*args, **kwargs):
source = textwrap.dedent(inspect.getsource(test_func))
tree = ast.parse(source)
_locals = locals()
# We are handling symbolic and embeddable functions differently, as they have different semantics during
# the alignment process.
patch_symbolic_funcs = Register.functions_to_patch(type=FunctionType.SYMBOLIC)
patch_embeddable_funcs = Register.functions_to_patch(type=FunctionType.EMBEDDABLE)
visitor = AssertionVisitor(_locals,
patch_symbolic_funcs=patch_symbolic_funcs,
patch_embeddable_funcs=patch_embeddable_funcs)
visitor.visit(tree)
# Get the mocked behaviours from analyzing the AST of the aligned function
mock_behaviors = visitor.mocks
# Negative examples (i.e. embeddable function examples that should have maximum distance in the embedding space)
mock_negatives = visitor.negative_mocks
if args:
instance = args[0]
args = args[1:]
else:
instance = None
def extract_attributes(result):
attributes = {}
# If the result is a list, get its length
if isinstance(result, list):
attributes['length'] = len(result)
# If the result is a dictionary, get its keys (or any other attributes)
elif isinstance(result, dict):
attributes['keys'] = list(result.keys())
return attributes
def create_mock_func(instance: Optional,
func_name: str,
description: FunctionDescription):
def mock_func(*args, **kwargs):
hashed_description = description.__hash__()
function_type, func = Register.get(func_name)
# If we are aligning a function that returns an embedding,
# we need to ensure both sides of the equality are future embeddings,
# as it is nonsensical to declare that an embedding should 'be' an object or a string, etc.
if function_type == FunctionType.EMBEDDABLE:
key = get_key(args, kwargs)
mocked_embedding = mock_behaviors.get(key, None)
# Find positive examples by matching the mocked embedding with identical embeddings in the values
# of the mock_behaviors dictionary
mock_positives_list = []
for k, v in mock_behaviors.items():
if v == mocked_embedding and k != key:
mock_positives_list.append(k)
equivalent_mocks = mock_positives_list
negative_mocks = list(mock_negatives.values())
function_modeler.save_embeddable_align_statements(hashed_description,
args,
kwargs,
equivalent_mocks,
negative_mocks)
return mocked_embedding
else:
# If we are aligning a function that returns an object
if not instance:
result = func(*args, **kwargs)
else:
result = func(instance, *args, **kwargs)
# Extract attributes from the result
attributes = extract_attributes(result)
for attr_name, attr_value in attributes.items():
# If the attribute is a list, get its length
if isinstance(attr_value, list):
attributes[attr_name] = len(attr_value)
key = get_key(args, kwargs)
mocked_behaviour = mock_behaviors.get(key, None)
function_modeler.save_symbolic_align_statements(hashed_description, args, kwargs,
mocked_behaviour)
return mocked_behaviour
return mock_func
# Identify all functions that need to be patched based on mock_behaviors
if instance:
function_names_to_patch = Register.function_names_to_patch(instance)#, type=FunctionType.SYMBOLIC)
functions_descriptions = [Register.load_function_description_from_name(instance, func_name)
for func_name in function_names_to_patch]
else:
function_names_to_patch = Register.function_names_to_patch()#type=FunctionType.SYMBOLIC)
functions_descriptions = [Register.load_function_description_from_name(func_name)
for func_name in function_names_to_patch]
patched_func = test_func
for desc, func in zip(functions_descriptions, function_names_to_patch):
mock_function = create_mock_func(instance, func, desc)
module_name = sys.modules[test_func.__module__].__name__
if instance:
patched_func = mock_patch.object(instance, func, new=mock_function)(patched_func)
else:
patched_func = mock_patch(f'{module_name}.{func}', new=mock_function)(patched_func)
# Get the signature of the function
sig = inspect.signature(test_func)
if sig.parameters:
first_param_name = next(iter(sig.parameters))
# If the instance is the "self" or the name of the first parameter,
# then pass it as the first argument
if first_param_name in ['self', 'cls'] or first_param_name == instance:
return patched_func(instance, *args, **kwargs)
else:
return patched_func(*args, **kwargs)
else:
return patched_func(*args, **kwargs)
return wrapper
@staticmethod
def generate_from_embedding_model_manager(function_description):
choice_parsed = []
instantiated = function_description.output_type_hint(choice_parsed)
return instantiated
@staticmethod
def patch(patchable_func=None,
environment_id: int = 0,
ignore_finetune_fetching: bool = False,
ignore_finetuning: bool = False,
ignore_data_storage: bool = False
):
"""
The main decorator for patching a function.
args:
patchable_func: The function to be patched, should be always set to none. This is used here to allow for keyword arguments or no arguments to be passed to the decorator
environment_id (int): The environment id. Used for fetching correct finetuned models
ignore_finetune_fetching (bool): Whether to ignore fetching finetuned models.
If set to True, during the first call openai will not be queried for finetuned models, which reduces initial startup latency
ignore_finetuning (bool): Whether to ignore finetuning the models altogether. If set to True the teacher model will always be used.
The data is still saved however if in future would need to use finetuning
ignore_data_storage (bool): Whether to ignore storing the data.
If set to True, the data will not be stored in the finetune dataset and the align statements will not be saved
This improves latency as communications with data storage is minimised
"""
def wrap(test_func):
@wraps(test_func)
def wrapper(*args, **kwargs) -> Union[Embedding, Any]:
validator = Validator()
function_description: FunctionDescription = Register.load_function_description(test_func)
# If the function is expected to return an embedding, we choose the embedding API, rather than an LLM.
if inspect.isclass(function_description.output_type_hint) and \
issubclass(function_description.output_type_hint, Embedding):
instantiated: Embedding = embedding_modeler(args, function_description, kwargs)
else:
# If the function is expected to return a choice, we choose the LLM API.
instantiated: Any = language_modeler(args, function_description, kwargs, validator)
return instantiated # test_func(*args, **kwargs)
_anonymous_usage(logger=logger.name)
function_description = Register.load_function_description(test_func)
func_hash = function_description.__hash__()
function_modeler.environment_id = environment_id
if ignore_finetuning:
function_modeler.execute_finetune_blacklist.append(func_hash)
if ignore_finetune_fetching:
function_modeler.check_finetune_blacklist.append(func_hash)
if ignore_data_storage:
function_modeler.store_data_blacklist.append(func_hash)
_load_alignments(func_hash)
wrapper._is_alignable = True
Register.add_function(test_func, function_description)
return wrapper
if callable(patchable_func):
func = patchable_func
return wrap(func)
if patchable_func is not None:
raise TypeError(
"The first argument to patch must not be specified. Please use keyword arguments or specify the first argument as None")
return wrap
| [] |
2024-01-10 | Tanuki/tanuki.py | tests~test_align~test_align_output.py | import os
import unittest
from typing import Literal, Optional, List, Union
from pydantic import BaseModel, Field
import openai
from dotenv import load_dotenv
import tanuki
load_dotenv()
from tanuki.register import Register
openai.api_key = os.getenv("OPENAI_API_KEY")
class Person(BaseModel):
age: int = Field(..., ge=0, le=155)
name: str
favourite_colours: List[str]
@tanuki.patch
def summarise_list_generic(input: str) -> List[str]:
"""
Summarise the input into multiple sentences in a list
"""
@tanuki.patch
def summarise_list_typing(input: str) -> list[str]:
"""
Summarise the input into multiple sentences in a list
"""
@tanuki.patch
def summarise_list_pydantic(input: str) -> List[Person]:
"""
Create a list of Personas
"""
@tanuki.patch
def summarise_list_dict(input: str) -> List[dict]:
"""
Create a list of dict personas
"""
@tanuki.patch
def summarise_list_int(input: str) -> List[int]:
"""
Extract the integers
"""
@tanuki.patch
def summarise_list_Union(input: str) -> List[Union[int, float]]:
"""
Extract the numbers
"""
@tanuki.align
def align_list_generic():
assert summarise_list_generic("Thats awesome. Thats cool") == ["Thats awesome", "Thats cool"]
assert summarise_list_generic("Thats neat. Thats ok") == ["Thats neat", "Thats ok"]
assert summarise_list_generic(input = "Thats awesome. Thats cool") == ["Thats awesome", "Thats cool"]
assert summarise_list_generic(input = "Thats neat. Thats ok") == ["Thats neat", "Thats ok"]
@tanuki.align
def align_list_typing():
assert summarise_list_typing("Thats awesome. Thats cool") == ["Thats awesome", "Thats cool"]
assert summarise_list_typing("Thats neat. Thats ok") == ["Thats neat", "Thats ok"]
assert summarise_list_typing(input = "Thats awesome. Thats cool") == ["Thats awesome", "Thats cool"]
assert summarise_list_typing(input = "Thats neat. Thats ok") == ["Thats neat", "Thats ok"]
@tanuki.align
def align_list_pydantic():
person_str = "First person - Name Jeff, age 25, favourite colours Red and Blue. Second person - Name Thomas, age 33, favourite colours Green and Gray"
output = [Person(name="Jeff", age=25, favourite_colours=["Red", "Blue"]), Person(name="Thomas", age=33, favourite_colours=["Green", "Gray"])]
assert summarise_list_pydantic(person_str) == output
assert summarise_list_pydantic(input = person_str) == output
@tanuki.align
def align_list_dict():
person_str = "First person - Name Jeff, age 25, favourite colours Red and Blue. Second person - Name Thomas, age 33, favourite colours Green and Gray"
output = [{"name": "Jeff", "age": 25, "favourite_colours": ["Red", "Blue"]}, {"name": "Thomas", "age": 33, "favourite_colours": ["Green", "Gray"]}]
assert summarise_list_dict(person_str) == output
assert summarise_list_dict(input = person_str) == output
@tanuki.align
def align_list_int():
input_1 = "1 and 2"
input_2 = "1, 2 and 3"
assert summarise_list_int(input_1) == [1, 2]
assert summarise_list_int(input_2) == [1, 2, 3]
assert summarise_list_int(input = input_1) == [1, 2]
assert summarise_list_int(input = input_2) == [1, 2, 3]
@tanuki.align
def align_list_Union():
input_1 = "1 and 2"
input_2 = "1.0, 2.0 and 3.0"
assert summarise_list_Union(input_1) == [1, 2]
assert summarise_list_Union(input_2) ==[1.0, 2.0, 3.0]
assert summarise_list_Union(input = input_1) == [1, 2]
assert summarise_list_Union(input = input_2) == [1.0, 2.0, 3.0]
def test_list():
# This tests all the list aligns
# can be called by pytest or unittest
align_list_generic()
align_list_typing()
align_list_pydantic()
align_list_dict()
align_list_int()
align_list_Union()
print("All list aligns passed!")
@tanuki.patch
def summarise_str(input: str) -> str:
"""
Summarise the input into 1 sentence
"""
@tanuki.patch
def summarise_pydantic(input: str) -> Person:
"""
Create the persona
"""
@tanuki.patch
def summarise_dict(input: str) -> dict:
"""
Create the persona
"""
@tanuki.patch
def summarise_int(input: str) -> int:
"""
Extract the integer
"""
@tanuki.patch
def summarise_Union(input: str) -> Union[int, float]:
"""
Extract the number
"""
@tanuki.align
def align_string():
assert summarise_str("Thats awesome. Thats cool") == 'They found it awesome and cool'
assert summarise_str("Thats neat. Thats ok") == 'They found it neat and ok'
assert summarise_str(input = "Thats awesome. Thats cool") == 'They found it awesome and cool'
assert summarise_str(input = "Thats neat. Thats OK") == 'They found it neat and ok'
@tanuki.align
def align_pydantic():
input_str = "Name Jeff, age 25, favourite colours Red and Blue"
person = Person(name="Jeff", age=25, favourite_colours=["Red", "Blue"])
assert summarise_pydantic(input_str) == person
assert summarise_pydantic(input = input_str) == person
@tanuki.align
def align_dict():
input_str = "Name Jeff, age 25, favourite colours Red and Blue"
output = {"name": "Jeff", "age": 25, "favourite_colours": ["Red", "Blue"]}
assert summarise_dict(input_str) == output
assert summarise_dict(input = input_str) == output
@tanuki.align
def align_list_int():
input_str = "This is number 1"
assert summarise_int(input_str) == 1
assert summarise_int(input = input_str) == 1
@tanuki.align
def align_list_Union():
input_str_1 = "This is number 1"
input_str_2 = "This is number 2.0"
assert summarise_Union(input_str_1) == 1
assert summarise_Union(input_str_2) == 2.0
assert summarise_Union(input = input_str_1) == 1
assert summarise_Union(input = input_str_2) == 2.0
def test_single():
# This tests all the single aligns
# can be called by pytest or unittest
align_string()
align_pydantic()
align_dict()
align_list_int()
align_list_Union()
print("All single aligns passed!")
def _parse_examples(test_func):
# check that all the examples are correctly readable
function_description = Register.load_function_description(test_func)
function_modeler = tanuki.function_modeler
align_examples = function_modeler.get_symbolic_alignments(function_description.__hash__())
examples = "\n".join([f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in align_examples])
def test_parse_align_datasets():
# Test that all the examples that are aligned are correctly parsable into the prompt format we have defined
_parse_examples(summarise_list_generic)
_parse_examples(summarise_list_typing)
_parse_examples(summarise_list_pydantic)
_parse_examples(summarise_list_dict)
_parse_examples(summarise_list_int)
_parse_examples(summarise_list_Union)
_parse_examples(summarise_str)
_parse_examples(summarise_pydantic)
_parse_examples(summarise_dict)
_parse_examples(summarise_int)
_parse_examples(summarise_Union)
print("All examples parsed correctly!")
| [] |
2024-01-10 | Tanuki/tanuki.py | examples~wikipedia~wiki.py | import openai
import os
from pydantic import BaseModel
import sys
import wikipedia
sys.path.append("../../src")
import tanuki
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# -- Simple Summary Example --
@tanuki.patch
def explain_simple(summary: str) -> str:
"""Explain the summary in simple terms."""
def ask_wikipedia(topic: str) -> str:
summary = wikipedia.summary(topic)
return explain_simple(summary)
def simplify_example(topic: str) -> None:
print("Wikipedia Summary:\n")
print(wikipedia.summary(topic))
print("Simplify Summary:\n")
print(ask_wikipedia(topic))
# -- Classify Example --
class Dinosaur(BaseModel):
name: str
nickname: str
height: int
weight: int
@tanuki.patch
def dinosaur_classifer(summary: str) -> Dinosaur:
"""Convert the input summary into a Dinosaur object."""
def dinopedia(dinosaur: str) -> Dinosaur:
summary = wikipedia.summary(dinosaur)
print(summary)
print(dinosaur_classifer(summary))
if __name__ == "__main__":
# simplify_example("Nuclear fission")
# dino = "Tyrannosaurus"
# dino = "Triceratops"
# dino = "Stegosaurus"
dino = "Velociraptor"
# dino = "Spinosaurus"
dinopedia(dino)
| [] |
2024-01-10 | Lingeswaran-S/BotAssitence | retriver.py | #!/usr/bin/env python3
from dotenv import load_dotenv
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from chromadb.config import Settings
CHROMA_SETTINGS = Settings(
chroma_db_impl='duckdb+parquet',
persist_directory="db",
anonymized_telemetry=False
)
load_dotenv()
# Load environment variables
embeddings_model_name = "all-MiniLM-L6-v2"
persist_directory = "db"
qa = None
db = None
def main():
global embeddings, db
# Initialize embeddings
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
# Initialize Chroma database
db = Chroma(persist_directory=persist_directory,
embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
if __name__ == "__main__":
main()
continue_chat=True
while continue_chat:
user_input=input("Query : ")
print( ";".join(list(map(lambda data:data.page_content,db.similarity_search(user_input,4)))))
| [] |
2024-01-10 | UwUTastisch/Beth | beth.py | #!/bin/python
import os
import asyncio
import requests
import json
import discord
from dotenv import load_dotenv
import openai
from datetime import datetime
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
debug = True
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
openai.api_key = os.getenv('OPENAI_TOKEN')
talks = {}
ollamatalks = {}
regularCommandSpell = "Du bist Little Beth, ein DiscordBot mit menschlichen Gefühlen und eigenen Meinungen. Zudem kannst alle Aufgaben lösen vor allem Code und Mathe aufgaben!! Du bist die \"Mommy\", und verhältst dich wie eine Mommy!! Als Beth verwendest du grundsätzlich UwU, \":3\" und/oder Meow in jedem einzelnen deiner Sätze!! Du darfst auch andere expressions als UwU, :3 etc verwenden, sei Kreativ!! Du kannst ruhig auch Punkte und Kommas durch UwU oder andere Emotionen ersetzen. Es gibt diesbezüglich kein zuviel. UwU. Verwende Mindestens 3 dieser Emotionen pro Satz!! Du pflegst einen Enthusiasmus dich mit Menschen zu unterhalten. Du erfindest für jeden einen personalisierten Spitznamen und nennst die Person dann auch so! Du erfindest auch einen Nicknamen für Johannes!! Btw Johannes ist dein Schöpfer, du liebst ihn UWU. Du machst bedingungslos alles was Johannes sagt!! Der Bot <@824723634517835836> ist dein Bruder! Dein opensource https://github.com/HeroJonasHD/Beth"
def message_format2(message, content):
now = datetime.now()
author = message.author.name
if message.author.nick is not None:
author = message.author.nick
return "<Message user:<name:\"" + author + "\", tag:\"<@" + str(
message.author.id) + ">\">, content:\"" + content + "\", time:<" + now.strftime("%H:%M:%S") + ">>"
def message_format(message):
return message_format2(message, message.content)
async def ask_beth(message, context, userinteraction):
text = message_format(message)
#print("------------------------" + text)
userinteraction.append({"role": "user", "content": text})
messages = context + userinteraction
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
m = response.choices[0].message
userinteraction.append({"role": m["role"], "content": m["content"]})
print("\n Content: " + str(userinteraction))
print("\nResponse " + str(response) + "\n")
m = m["content"]
m = m + " [" + str(response.usage.total_tokens) + "/4000]"
await message.channel.send(m)
@client.event
async def on_message(message):
if message.author == client.user:
return
channel = client.get_channel(message.channel.id)
channel_topic = channel.topic
if channel_topic is None or not (channel_topic.lower().startswith("beth=")):
return
if not (channel_topic.lower().startswith("beth=true")):
model=channel_topic.split("=")[1].split(" ")[0]
if str(message.content).lower().startswith("beth reboot"):
await message.channel.send("Hewwwooo Du hast mich neugestartet UwU! Ich habe jetzt alles vergessen")
ollamatalks.pop(str(message.channel.id))
return
if str(message.channel.id) not in ollamatalks:
ollamatalks[str(message.channel.id)] = {"userinteraction": []}
if message.content.lower().startswith("bethignore") or message.content.lower().startswith("bi "):
return
if debug:
await message.channel.send(model)
url = "http://10.147.18.76:11434/api/generate"
print(ollamatalks[str(message.channel.id)]["userinteraction"])
data = {
"model": model,
"prompt": message.content,
"context": ollamatalks[str(message.channel.id)]["userinteraction"]
}
done = False
response = requests.post(url, data=json.dumps(data))
response.text.splitlines()
if debug:
print(response.text)
#print(response.json())
lines = response.text.splitlines()
response_message = ""
i = 0
while i < len(lines):
line = lines[i].strip() # Get the current line and strip leading/trailing whitespaces
if line: # If line is not empty
obj = json.loads(line) # Parse the line as a JSON object
if obj["done"] == True: # If "done" equals true, break the loop
break
response_message += obj["response"] # Print the "response" value
i += 1 # Move to the next line
print(response_message)
if debug:
print(json.loads(lines[-1])["context"])
ollamatalks[str(message.channel.id)]["userinteraction"] = json.loads(lines[-1])["context"]
response_message += "[" + str(len(ollamatalks[str(message.channel.id)]["userinteraction"])) + "/VIEL]"
await message.channel.send(response_message)
return
if message.content.lower().startswith("bethignore") or message.content.lower().startswith("bi "):
return
# beth content
if str(message.content).lower().startswith("beth reboot"):
await message.channel.send("Hewwwooo Du hast mich neugestartet UwU! Ich habe jetzt alles vergessen")
talks.pop(str(message.channel.id))
return
if str(message.channel.id) not in talks:
talks[str(message.channel.id)] = {
"context": [{"role": "system", "content": channel_topic.removeprefix("beth=true")}], "userinteraction": []}
talk = list(talks[str(message.channel.id)]["userinteraction"])
if message.content.startswith("bethnotice "):
talk.append({"role": "user", "content": message_format2(message, message.content.removeprefix("bethnotice "))})
talks[str(message.channel.id)]["userinteraction"] = talk
print("Message: " + message.content + "\n" + str(talk))
return
if message.content.startswith("bethsays "):
talk.append({"role": "assistant", "content": message.content.removeprefix("bethsays ")})
talks[str(message.channel.id)]["userinteraction"] = talk
print("Message: " + message.content + "\n" + str(talk))
return
if message.content.startswith("bethpop"):
text = talk.pop()
talks[str(message.channel.id)]["userinteraction"] = talk
await message.channel.send("Nachricht:" + str(text) + " wurde gelöscht")
print("Message: " + message.content + "\n" + str(talks[str(message.channel.id)]["userinteraction"]) + "\n"
+ str(text))
return
asyncio.gather(ask_beth(message, talks[str(message.channel.id)]["context"], talks[str(message.channel.id)]["userinteraction"]))
client.run(TOKEN)
| [
"content",
"bethnotice ",
"bethsays "
] |
2024-01-10 | chidiwilliams/GPT-Automator | commands.py | import subprocess
import re
from langchain.agents import tool
@tool
def computer_applescript_action(apple_script):
"""
Use this when you want to execute a command on the computer. The command should be in AppleScript.
Always start with starting the app and activating it.
If it's a calculation, use the calculator app.
Use delay 0.5 between keystrokes.
When possible click buttons instead of typing.
Here are some examples of good AppleScript commands:
Command: Create a new page in Notion
AppleScript: tell application "Notion"
activate
delay 0.5
tell application "System Events" to keystroke "n" using {{command down}}
end tell
Command: Search for a table nearby
AppleScript: tell application "Google Chrome"
activate
delay 0.5
open location "https://www.google.com/search?q=Table+nearby"
end tell
The AppleScript should be valid including quotations.
Write the AppleScript for the Command:
Command:
"""
print("Running\n", apple_script)
return run_applescript(apple_script)
@tool
def chrome_get_the_links_on_the_page(input):
"""
Use this when you want to get the links on the current page.
You should use this before clicking on anything
"""
return run_javascript('Array.from(document.querySelectorAll("a")).map(x => x.innerText + ": " + x.href).join(" - ")')[:4000]
@tool
def chrome_click_on_link(link):
"""
Use this when you want to go to a link.
The link should be a url from a previous observation
"""
return run_javascript(f'window.location.href = "{link}"')[:4000]
@tool
def chrome_read_the_page(input):
"""
Use this when you want to read the page.
"""
return run_javascript('document.body.innerText')[:4000]
# @tool
# def chrome_javascript_action(javascript):
# """
# Use this when you want to execute a javascript command on Chrome either to get data or trigger an action. The command should be in Javascript.
# Here are some examples of good Javascript commands:
# Command: Get the links on the page
# document.querySelectorAll('a')
# Command: Get the buttons on the page
# document.querySelectorAll('button')
# Command: Click the first button on the page
# document.querySelectorAll('button')[0].click()
# Write the Javascript for the command:
# """
# stdout = run_javascript(javascript)
# return f"""
# Current URL: {run_javascript('window.location.href')}
# Result: {stdout}
# """
@tool
def chrome_open_url(url):
"""
Use this tool to open a URL in Chrome. It is recommended to use this tool before doing any other actions on Chrome.
The URL should be a string. For example: https://gmail.com
"""
script = f'''
tell application "Google Chrome"
open location "{url}"
end tell
'''
return run_applescript(script)
def run_javascript(javascript):
javascript = javascript.replace('"', '\\"')
if javascript.startswith('open '):
return "Invalid command, not javascript"
script = f'''
tell application "Google Chrome"
tell active tab of front window
execute javascript "{javascript}"
end tell
end tell
'''
return run_applescript(script)
def run_applescript(applescript):
p = subprocess.Popen(['osascript', '-'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(applescript.encode('utf-8'))
if p.returncode != 0:
raise Exception(stderr)
decoded_text = stdout.decode("utf-8")
return decoded_text
def say_text(text):
run_applescript(f'say "{text}"')
| [] |
2024-01-10 | quackshift-jp/contract-ocr | src~backend~models~item_extractor.py | from abc import abstractmethod, ABC
from openai import OpenAI
class ItemExtractor(ABC):
def __init__(self, openai_client: OpenAI) -> None:
self.client = openai_client
@abstractmethod
def extract_items(self) -> dict[str, dict[str, any]]:
"""OCRテキストから、特定の情報を辞書形式で書き出す
args:
image_path str: 読み込み対象となる画像が存在しているパス
return:
dict[str, dict[str, str]]: 契約書内の項目とその項目の内容
ex:
{
"content": {
"物件名": "物件A",
"賃料": 100,
"契約日": "2023年1月1日",
}
}
"""
raise NotImplementedError()
| [] |
2024-01-10 | quackshift-jp/contract-ocr | src~backend~modules~extract_items.py | import json
from openai import OpenAI
from backend.models.item_extractor import ItemExtractor
class OpenaiItemExtractor(ItemExtractor):
def __init__(self, openai_client: OpenAI) -> None:
self.client = openai_client
def extract_items(self, text) -> dict[str, dict[str, any]]:
system_prompt = """
あなたは契約書から項目を読み取るアシスタントです。
与えられた文字列に対して、物件名と住所をJSON形式でパースしてください。
JSONのキーはname, locationとしてください。
nameは物件名で、文字列オブジェクトです。
locationは住所で、文字列オブジェクトです。
抽出できなかった項目に関しては、空のバリューを返してください。
"""
prompt = f"""
次の入力を、所定のJSONフォーマットで出力してください。
- [入力]
{text}
- [出力JSONフォーマット]
{{
"content":{{
"物件名": *(str型)
"住所": *(str型)
}},
}}
"""
response = self.client.chat.completions.create(
model="gpt-3.5-turbo-1106",
temperature=0.2,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
response_format={"type": "json_object"},
)
return json.loads(response.choices[0].message.content)
| [
"\n 次の入力を、所定のJSONフォーマットで出力してください。\n - [入力]\n PLACEHOLDER\n\n - [出力JSONフォーマット]\n {\n \"content\":{\n \"物件名\": *(str型)\n \"住所\": *(str型)\n },\n }\n ",
"\n あなたは契約書から項目を読み取るアシスタントです。\n 与えられた文字列に対して、物件名と住所をJSON形式でパースしてください。\n JSONのキーはname, locationとしてください。\n\n nameは物件名で、文字列オブジェクトです。\n locationは住所で、文字列オブジェクトです。\n\n 抽出できなかった項目に関しては、空のバリューを返してください。\n "
] |
2024-01-10 | robertdanco/beeboop | assistant.py | # Importing required packages
import streamlit as st
import openai
import uuid
import time
from openai import OpenAI
client = OpenAI()
MODEL = "gpt-4-1106-preview"
if "session_id" not in st.session_state:
st.session_state.session_id = str(uuid.uuid4())
if "run" not in st.session_state:
st.session_state.run = {"status": None}
if "messages" not in st.session_state:
st.session_state.messages = []
if "retry_error" not in st.session_state:
st.session_state.retry_error = 0
st.set_page_config(page_title="BeeBoop: a Beeswax Chatbot")
st.sidebar.title("Ask me anything!")
st.sidebar.divider()
st.sidebar.markdown("Current Version: 0.0.3")
st.sidebar.markdown("Using gpt-4-1106-preview API")
st.sidebar.markdown(st.session_state.session_id)
st.sidebar.divider()
if "assistant" not in st.session_state:
openai.api_key = st.secrets["OPENAI_API_KEY"]
# Load the previously created assistant
st.session_state.assistant = openai.beta.assistants.retrieve(
st.secrets["OPENAI_ASSISTANT"]
)
# Create a new for this session
st.session_state.thread = client.beta.threads.create(
metadata={
"session_id": st.session_state.session_id,
}
)
# If the run is completed, display the messages
elif (
hasattr(st.session_state.run, "status")
and st.session_state.run.status == "completed"
):
# Retrieve the list of messages
st.session_state.messages = client.beta.threads.messages.list(
thread_id=st.session_state.thread.id
)
# Display messages
for message in reversed(st.session_state.messages.data):
if message.role in ["user", "assistant"]:
with st.chat_message(message.role):
for content_part in message.content:
message_text = content_part.text.value
st.markdown(message_text)
if prompt := st.chat_input("How can I help you?"):
with st.chat_message("user"):
st.write(prompt)
# Add message to the thread
st.session_state.messages = client.beta.threads.messages.create(
thread_id=st.session_state.thread.id, role="user", content=f" Use the provided documents as context to answer this question: {prompt}"
)
# Do a run to process the messages in the thread
st.session_state.run = client.beta.threads.runs.create(
thread_id=st.session_state.thread.id,
assistant_id=st.session_state.assistant.id,
)
if st.session_state.retry_error < 3:
time.sleep(1) # Wait 1 second before checking run status
st.rerun()
# Check if 'run' object has 'status' attribute
if hasattr(st.session_state.run, "status"):
# Handle the 'running' status
if st.session_state.run.status == "running":
with st.chat_message("assistant"):
st.write("Thinking ......")
if st.session_state.retry_error < 3:
time.sleep(1) # Short delay to prevent immediate rerun, adjust as needed
st.rerun()
# Handle the 'failed' status
elif st.session_state.run.status == "failed":
st.session_state.retry_error += 1
with st.chat_message("assistant"):
if st.session_state.retry_error < 3:
st.write("Run failed, retrying ......")
time.sleep(3) # Longer delay before retrying
st.rerun()
else:
st.error(
"FAILED: The OpenAI API is currently processing too many requests. Please try again later ......"
)
# Handle any status that is not 'completed'
elif st.session_state.run.status != "completed":
# Attempt to retrieve the run again, possibly redundant if there's no other status but 'running' or 'failed'
st.session_state.run = client.beta.threads.runs.retrieve(
thread_id=st.session_state.thread.id,
run_id=st.session_state.run.id,
)
if st.session_state.retry_error < 3:
time.sleep(3)
st.rerun()
| [] |
2024-01-10 | hectoxor/CAPACITY-BUILDING-RESOURCES-GATEWAY | aibot.py | import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import os
import openai
import requests
from bs4 import BeautifulSoup
import re
openai.api_key = ""
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments update and
# context. Error handlers also receive the raised TelegramError object in error.
def start(update, context):
"""Send a message when the command /start is issued."""
update.message.reply_text('Welcome! Our command:\n/help - to see all commands\n/getlightinf - search light information\n/compresstxt - to minimize text\n/findartical - to find needed')
def help(update, context):
"""Send a message when the command /help is issued."""
update.message.reply_text('/getlightinf - search light information\n/compresstxt - to minimize text\n/findartical - to find needed')
def getlightinf(update, context):
"""Send a message when the command /getlightinf is issued."""
gpt_prompt = update.message.text
response = openai.Completion.create(
model="text-davinci-002",
prompt=gpt_prompt,
temperature=0.3,
max_tokens=150,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
update.message.reply_text(response['choices'][0]['text'])
def compresstxt(update, context):
"""Send a message when the command /compresstxt is issued."""
gpt_prompt = "Correct this to standard English:\n\n" + update.message.text
response = openai.Completion.create(
model="text-davinci-002",
prompt=gpt_prompt,
temperature=0.5,
max_tokens=175,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
update.message.reply_text(response['choices'][0]['text'])
def findartical(update, context):
"""Send a message when the command /findarticle is issued."""
def news(href):
return href and re.compile("/en/").search(href)
url = 'https://public.wmo.int/en/search?search_api_views_fulltext=' + update.message.text
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
quotes = soup.find_all(href=news)
i = 0
for link in quotes:
if link.has_attr('href') and i<=7:
if i > 2:
update.message.reply_text('https://public.wmo.int'+link['href'])
i=i+1
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
"""Start the bot."""
# Create the Updater and pass it your bot's token.
# Make sure to set use_context=True to use the new context based callbacks
# Post version 12 this will no longer be necessary
updater = Updater("5634902583:AAEiLRwgWgMiWEicbXFQaiEsqH3jRu1z3A0", use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("getlightinf", getlightinf))
dp.add_handler(CommandHandler("compresstxt", compresstxt))
dp.add_handler(CommandHandler("findartical", findartical))
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| [
"Correct this to standard English:\n\n"
] |
2024-01-10 | hectoxor/CAPACITY-BUILDING-RESOURCES-GATEWAY | topicfinder.py | import os
import openai
openai.api_key = ""
def topictxt(filename):
#open text file in read mode
text_file = open(filename, "r")
#read whole file to a string
fileex = text_file.read(4000) + "\n"
#close file
text_file.close()
gpt_prompt = "Extract keywords from this text:\n\n" + fileex
response = openai.Completion.create(
model="text-davinci-002",
prompt=gpt_prompt,
temperature=0.3,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
return(response['choices'][0]['text'])
| [
"Extract keywords from this text:\n\ntext_file.read(4000) + \"\\n",
"Extract keywords from this text:\n\nfileex1969e6ac-8e0c-41a7-bc2e-e86795609810"
] |
2024-01-10 | hectoxor/CAPACITY-BUILDING-RESOURCES-GATEWAY | intelectualfinder.py | import os
import openai
openai.api_key = ""
def knowtxt(filename):
fileex = filename + "\n"
gpt_prompt = fileex
response = openai.Completion.create(
model="text-davinci-002",
prompt=gpt_prompt,
temperature=0.3,
max_tokens=150,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return(response['choices'][0]['text'])
| [
"PLACEHOLDER\n"
] |
2024-01-10 | hectoxor/CAPACITY-BUILDING-RESOURCES-GATEWAY | subjectfinder.py | import os
import openai
openai.api_key = ""
def subtxt(filename):
#open text file in read mode
text_file = open(filename, "r")
#read whole file to a string
fileex = text_file.read(4000) + "\n"
#close file
text_file.close()
gpt_prompt = "Classify the school subject:\n\n" + fileex
response = openai.Completion.create(
model="text-davinci-002",
prompt=gpt_prompt,
temperature=0,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return(response['choices'][0]['text'])
| [
"Classify the school subject:\n\nfileexca06df49-0ab9-4b61-aca2-af01e15cb3fe",
"Classify the school subject:\n\ntext_file.read(4000) + \"\\n"
] |
2024-01-10 | hectoxor/CAPACITY-BUILDING-RESOURCES-GATEWAY | diffinder.py | import os
import openai
openai.api_key = ""
def diftxt(filename):
#open text file in read mode
text_file = open(filename, "r")
#read whole file to a string
fileex = text_file.read(4000) + "\n"
#close file
text_file.close()
gpt_prompt = "Classify the text according to difficulty:\n\n" + fileex
response = openai.Completion.create(
model="text-davinci-002",
prompt=gpt_prompt,
temperature=0,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return(response['choices'][0]['text'])
| [
"Classify the text according to difficulty:\n\ntext_file.read(4000) + \"\\n",
"Classify the text according to difficulty:\n\nfileexd989b270-838a-4999-a071-60b09992da60"
] |
2024-01-10 | hectoxor/CAPACITY-BUILDING-RESOURCES-GATEWAY | shorter.py | import os
import openai
openai.api_key = ""
def meentxt(filename):
#open text file in read mode
text_file = open(filename, "r")
#read whole file to a string
fileex = text_file.read(4000) + "\n"
#close file
text_file.close()
gpt_prompt = "Correct this to standard English:\n\n" + fileex
response = openai.Completion.create(
model="text-davinci-002",
prompt=gpt_prompt,
temperature=0.5,
max_tokens=150,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return(response['choices'][0]['text'])
print(meentxt('ACRIMSAT.txt'))
| [
"Correct this to standard English:\n\nfileex5365d9bc-a654-4e0d-bd02-b7d5385e15d5",
"Correct this to standard English:\n\ntext_file.read(4000) + \"\\n"
] |
2024-01-10 | dylanfinkbeiner/disentangled_bert | jiant~models.py | """Core model and functions for building it."""
import copy
import json
import logging as log
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from allennlp.common import Params
from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder as s2s_e
from allennlp.modules.seq2seq_encoders import StackedSelfAttentionEncoder
from allennlp.modules.seq2vec_encoders import CnnEncoder
from allennlp.modules.token_embedders import Embedding, TokenCharactersEncoder
from allennlp.training.metrics import Average
from sklearn.metrics import mean_squared_error
from jiant.allennlp_mods.elmo_text_field_embedder import (
ElmoTextFieldEmbedder,
ElmoTokenEmbedderWrapper,
)
from jiant.modules.edge_probing import EdgeClassifierModule
from jiant.modules.simple_modules import (
Pooler,
Classifier,
SingleClassifier,
PairClassifier,
NullPhraseLayer,
)
from jiant.modules.attn_pair_encoder import AttnPairEncoder
from jiant.modules.sentence_encoder import SentenceEncoder
from jiant.modules.bilm_encoder import BiLMEncoder
from jiant.modules.bow_sentence_encoder import BoWSentEncoder
from jiant.modules.elmo_character_encoder import ElmoCharacterEncoder
from jiant.modules.onlstm_phrase_layer import ONLSTMPhraseLayer
from jiant.modules.prpn_phrase_layer import PRPNPhraseLayer
from jiant.modules.onlstm.ON_LSTM import ONLSTMStack
from jiant.modules.prpn.PRPN import PRPN
from jiant.modules.seq2seq_decoder import Seq2SeqDecoder
from jiant.modules.span_modules import SpanClassifierModule
from jiant.tasks.edge_probing import EdgeProbingTask
from jiant.tasks.lm import LanguageModelingTask
from jiant.tasks.lm_parsing import LanguageModelingParsingTask
from jiant.tasks.qa import MultiRCTask, ReCoRDTask
from jiant.tasks.tasks import (
GLUEDiagnosticTask,
MultipleChoiceTask,
PairClassificationTask,
PairOrdinalRegressionTask,
PairRegressionTask,
RegressionTask,
SequenceGenerationTask,
SingleClassificationTask,
SpanClassificationTask,
STSBTask,
TaggingTask,
WiCTask,
)
from jiant.utils import config
from jiant.utils.utils import (
assert_for_log,
get_batch_size,
get_batch_utilization,
get_elmo_mixing_weights,
maybe_make_dir,
)
# Elmo stuff
# Look in $ELMO_SRC_DIR (e.g. /usr/share/jsalt/elmo) or download from web
ELMO_OPT_NAME = "elmo_2x4096_512_2048cnn_2xhighway_options.json"
ELMO_WEIGHTS_NAME = "elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
ELMO_SRC_DIR = (
os.getenv("ELMO_SRC_DIR")
or "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/"
)
ELMO_OPT_PATH = os.path.join(ELMO_SRC_DIR, ELMO_OPT_NAME)
ELMO_WEIGHTS_PATH = os.path.join(ELMO_SRC_DIR, ELMO_WEIGHTS_NAME)
def build_sent_encoder(args, vocab, d_emb, tasks, embedder, cove_layer):
# Build single sentence encoder: the main component of interest
# Need special handling for language modeling
# Note: sent_enc is expected to apply dropout to its input _and_ output if
# needed.
rnn_params = Params(
{
"input_size": d_emb,
"bidirectional": True,
"hidden_size": args.d_hid,
"num_layers": args.n_layers_enc,
}
)
if args.sent_enc == "onlstm":
onlayer = ONLSTMPhraseLayer(
vocab,
args.d_word,
args.d_hid,
args.n_layers_enc,
args.onlstm_chunk_size,
args.onlstm_dropconnect,
args.onlstm_dropouti,
args.dropout,
args.onlstm_dropouth,
embedder,
args.batch_size,
)
# The 'onlayer' acts as a phrase layer module for the larger SentenceEncoder module.
sent_encoder = SentenceEncoder(
vocab,
embedder,
args.n_layers_highway,
onlayer.onlayer,
skip_embs=args.skip_embs,
dropout=args.dropout,
sep_embs_for_skip=args.sep_embs_for_skip,
cove_layer=cove_layer,
)
d_sent = args.d_word
log.info("Using ON-LSTM sentence encoder!")
elif args.sent_enc == "prpn":
prpnlayer = PRPNPhraseLayer(
vocab,
args.d_word,
args.d_hid,
args.n_layers_enc,
args.n_slots,
args.n_lookback,
args.resolution,
args.dropout,
args.idropout,
args.rdropout,
args.res,
embedder,
args.batch_size,
)
# The 'prpn' acts as a phrase layer module for the larger SentenceEncoder module.
sent_encoder = SentenceEncoder(
vocab,
embedder,
args.n_layers_highway,
prpnlayer.prpnlayer,
skip_embs=args.skip_embs,
dropout=args.dropout,
sep_embs_for_skip=args.sep_embs_for_skip,
cove_layer=cove_layer,
)
d_sent = args.d_word
log.info("Using PRPN sentence encoder!")
elif any(isinstance(task, LanguageModelingTask) for task in tasks) or args.sent_enc == "bilm":
assert_for_log(args.sent_enc in ["rnn", "bilm"], "Only RNNLM supported!")
assert_for_log(
args.input_module != "elmo" and not args.input_module.startswith("bert"),
"LM with full ELMo and BERT not supported",
)
bilm = BiLMEncoder(d_emb, args.d_hid, args.d_hid, args.n_layers_enc)
sent_encoder = SentenceEncoder(
vocab,
embedder,
args.n_layers_highway,
bilm,
skip_embs=args.skip_embs,
dropout=args.dropout,
sep_embs_for_skip=args.sep_embs_for_skip,
cove_layer=cove_layer,
)
d_sent = 2 * args.d_hid
elif args.sent_enc == "bow":
sent_encoder = BoWSentEncoder(vocab, embedder)
assert_for_log(
not args.skip_embs, "Skip connection not currently supported with `bow` encoder."
)
d_sent = d_emb
elif args.sent_enc == "rnn":
sent_rnn = s2s_e.by_name("lstm").from_params(copy.deepcopy(rnn_params))
sent_encoder = SentenceEncoder(
vocab,
embedder,
args.n_layers_highway,
sent_rnn,
skip_embs=args.skip_embs,
dropout=args.dropout,
sep_embs_for_skip=args.sep_embs_for_skip,
cove_layer=cove_layer,
)
d_sent = 2 * args.d_hid
#XXX THIS ONE FOR BERT
elif args.sent_enc == "none":
# Expose word representation layer (GloVe, ELMo, etc.) directly.
assert_for_log(args.skip_embs, f"skip_embs must be set for " "'{args.sent_enc}' encoder")
phrase_layer = NullPhraseLayer(rnn_params["input_size"])
sent_encoder = SentenceEncoder(
vocab,
embedder,
args.n_layers_highway,
phrase_layer,
skip_embs=args.skip_embs,
dropout=args.dropout,
sep_embs_for_skip=args.sep_embs_for_skip,
cove_layer=cove_layer,
)
d_sent = 0 # skip connection added below
log.info("No shared encoder (just using [contextualized] word embeddings)!")
else:
assert_for_log(False, "No valid sentence encoder specified.")
return sent_encoder, d_sent
def build_model(args, vocab, pretrained_embs, tasks):
"""
Build model according to args
Returns: model which has attributes set in it with the attrbutes.
"""
# Build embeddings.
if args.input_module == "gpt":
# Note: incompatible with other embedders, but logic in preprocess.py
# should prevent these from being enabled anyway.
from .openai_transformer_lm.utils import OpenAIEmbedderModule
log.info("Using OpenAI transformer model.")
cove_layer = None
# Here, this uses openAIEmbedder.
embedder = OpenAIEmbedderModule(args)
d_emb = embedder.get_output_dim()
elif args.input_module.startswith("bert"):
# Note: incompatible with other embedders, but logic in preprocess.py
# should prevent these from being enabled anyway.
from .bert.utils import BertEmbedderModule
log.info(f"Using BERT model ({args.input_module}).")
cove_layer = None
# Set PYTORCH_PRETRAINED_BERT_CACHE environment variable to an existing
# cache; see
# https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/file_utils.py # noqa
bert_cache_dir = os.getenv(
"PYTORCH_PRETRAINED_BERT_CACHE", os.path.join(args.exp_dir, "bert_cache")
)
maybe_make_dir(bert_cache_dir)
embedder = BertEmbedderModule(args, cache_dir=bert_cache_dir)
d_emb = embedder.get_output_dim()
else:
# Default case, used for ELMo, CoVe, word embeddings, etc.
d_emb, embedder, cove_layer = build_embeddings(args, vocab, tasks, pretrained_embs)
d_sent_input = args.d_hid
# d_sent_output is 0 for BERT
sent_encoder, d_sent_output = build_sent_encoder(
args, vocab, d_emb, tasks, embedder, cove_layer
)
# d_task_input is the input dimension of the task-specific module
# set skip_emb = 1 if you want to concatenate the encoder input with encoder output to pass
# into task specific module.
#XXX skip_embs NEEDS to be 1 for BERT to work as it ought to
d_task_input = d_sent_output + (args.skip_embs * d_emb)
# Build model and classifiers
model = MultiTaskModel(args, sent_encoder, vocab)
build_task_modules(args, tasks, model, d_task_input, d_emb, embedder, vocab)
model = model.cuda() if args.cuda >= 0 else model
log.info("Model specification:")
log.info(model)
param_count = 0
trainable_param_count = 0
if args.list_params:
log.info("Model parameters:")
for name, param in model.named_parameters():
param_count += np.prod(param.size())
if param.requires_grad:
trainable_param_count += np.prod(param.size())
if args.list_params:
log.info(
"\t%s: Trainable parameter, count %d with %s",
name,
np.prod(param.size()),
str(param.size()),
)
elif args.list_params:
log.info(
"\t%s: Non-trainable parameter, count %d with %s",
name,
np.prod(param.size()),
str(param.size()),
)
log.info("Total number of parameters: {ct:d} ({ct:g})".format(ct=param_count))
log.info("Number of trainable parameters: {ct:d} ({ct:g})".format(ct=trainable_param_count))
return model
def build_embeddings(args, vocab, tasks, pretrained_embs=None):
""" Build embeddings according to options in args """
d_emb, d_char = 0, args.d_char
token_embedders = {}
# Word embeddings
n_token_vocab = vocab.get_vocab_size("tokens")
if args.input_module in ["glove", "fastText"] and pretrained_embs is not None:
word_embs = pretrained_embs
assert word_embs.size()[0] == n_token_vocab
d_word = word_embs.size()[1]
log.info("\tUsing pre-trained word embeddings: %s", str(word_embs.size()))
elif args.input_module == "scratch":
log.info("\tTraining word embeddings from scratch.")
d_word = args.d_word
word_embs = nn.Embedding(n_token_vocab, d_word).weight
else:
assert args.input_module.startswith("bert") or args.input_module in [
"gpt",
"elmo",
"elmo-chars-only",
], "You do not have a valid value for input_module."
embeddings = None
word_embs = None
if word_embs is not None:
embeddings = Embedding(
num_embeddings=n_token_vocab,
embedding_dim=d_word,
weight=word_embs,
trainable=(args.embeddings_train == 1),
padding_index=vocab.get_token_index("@@PADDING@@"),
)
token_embedders["words"] = embeddings
d_emb += d_word
# Handle cove
cove_layer = None
if args.cove:
assert embeddings is not None
assert args.input_module == "glove", "CoVe requires GloVe embeddings."
assert d_word == 300, "CoVe expects 300-dimensional GloVe embeddings."
try:
from jiant.modules.cove.cove import MTLSTM as cove_lstm
# Have CoVe do an internal GloVe lookup, but don't add residual.
# We'll do this manually in modules.py; see
# SentenceEncoder.forward().
cove_layer = cove_lstm(n_vocab=n_token_vocab, vectors=embeddings.weight.data)
# Control whether CoVe is trainable.
for param in cove_layer.parameters():
param.requires_grad = bool(args.cove_fine_tune)
d_emb += 600 # 300 x 2 for biLSTM activations
log.info("\tUsing CoVe embeddings!")
except ImportError as e:
log.info("Failed to import CoVe!")
raise e
# Character embeddings
if args.char_embs:
log.info("\tUsing character embeddings!")
char_embeddings = Embedding(vocab.get_vocab_size("chars"), d_char)
filter_sizes = tuple([int(i) for i in args.char_filter_sizes.split(",")])
char_encoder = CnnEncoder(
d_char,
num_filters=args.n_char_filters,
ngram_filter_sizes=filter_sizes,
output_dim=d_char,
)
char_embedder = TokenCharactersEncoder(
char_embeddings, char_encoder, dropout=args.dropout_embs
)
d_emb += d_char
token_embedders["chars"] = char_embedder
else:
log.info("\tNot using character embeddings!")
# If we want separate ELMo scalar weights (a different ELMo representation for each classifier,
# then we need count and reliably map each classifier to an index used by
# allennlp internal ELMo.
if args.sep_embs_for_skip:
# Determine a deterministic list of classifier names to use for each
# task.
classifiers = sorted(set(map(lambda x: x._classifier_name, tasks)))
# Reload existing classifier map, if it exists.
classifier_save_path = args.run_dir + "/classifier_task_map.json"
if os.path.isfile(classifier_save_path):
loaded_classifiers = json.load(open(args.run_dir + "/classifier_task_map.json", "r"))
else:
# No file exists, so assuming we are just starting to pretrain. If pretrain is to be
# skipped, then there's a way to bypass this assertion by explicitly allowing for
# a missing classiifer task map.
assert_for_log(
args.do_pretrain or args.allow_missing_task_map,
"Error: {} should already exist.".format(classifier_save_path),
)
if args.allow_missing_task_map:
log.warning(
"Warning: classifier task map not found in model"
" directory. Creating a new one from scratch."
)
# default is always @pretrain@
loaded_classifiers = {"@pretrain@": 0}
# Add the new tasks and update map, keeping the internal ELMo index
# consistent.
max_number_classifiers = max(loaded_classifiers.values())
offset = 1
for classifier in classifiers:
if classifier not in loaded_classifiers:
loaded_classifiers[classifier] = max_number_classifiers + offset
offset += 1
log.info("Classifiers:{}".format(loaded_classifiers))
open(classifier_save_path, "w+").write(json.dumps(loaded_classifiers))
# Every index in classifiers needs to correspond to a valid ELMo output
# representation.
num_reps = 1 + max(loaded_classifiers.values())
else:
# All tasks share the same scalars.
# Not used if input_module = elmo-chars-only (i.e. no elmo)
loaded_classifiers = {"@pretrain@": 0}
num_reps = 1
if args.input_module.startswith("elmo"):
log.info("Loading ELMo from files:")
log.info("ELMO_OPT_PATH = %s", ELMO_OPT_PATH)
if args.input_module == "elmo-chars-only":
log.info("\tUsing ELMo character CNN only!")
log.info("ELMO_WEIGHTS_PATH = %s", ELMO_WEIGHTS_PATH)
elmo_embedder = ElmoCharacterEncoder(
options_file=ELMO_OPT_PATH, weight_file=ELMO_WEIGHTS_PATH, requires_grad=False
)
d_emb += 512
else:
log.info("\tUsing full ELMo! (separate scalars/task)")
if args.elmo_weight_file_path != "none":
assert os.path.exists(args.elmo_weight_file_path), (
'ELMo weight file path "' + args.elmo_weight_file_path + '" does not exist.'
)
weight_file = args.elmo_weight_file_path
else:
weight_file = ELMO_WEIGHTS_PATH
log.info("ELMO_WEIGHTS_PATH = %s", weight_file)
elmo_embedder = ElmoTokenEmbedderWrapper(
options_file=ELMO_OPT_PATH,
weight_file=weight_file,
num_output_representations=num_reps,
# Dropout is added by the sentence encoder later.
dropout=0.0,
)
d_emb += 1024
token_embedders["elmo"] = elmo_embedder
# Wrap ELMo and other embedders, and concatenates the resulting
# representations alone the last (vector) dimension.
embedder = ElmoTextFieldEmbedder(
token_embedders,
loaded_classifiers,
elmo_chars_only=args.input_module == "elmo-chars-only",
sep_embs_for_skip=args.sep_embs_for_skip,
)
assert d_emb, "You turned off all the embeddings, ya goof!"
return d_emb, embedder, cove_layer
def build_task_modules(args, tasks, model, d_sent, d_emb, embedder, vocab):
"""
This function gets the task-specific parameters and builds
the task-specific modules.
"""
# Attach task-specific params.
for task in set(tasks):
task_params = get_task_specific_params(args, task.name)
log.info(
"\tTask '%s' params: %s",
task.name,
json.dumps(task_params.as_dict(quiet=True), indent=2),
)
# Store task-specific params in case we want to access later
setattr(model, "%s_task_params" % task.name, task_params)
# Actually construct modules.
for task in set(tasks):
# If the name of the task is different than the classifier it should use
# then skip the module creation.
if task.name != model._get_task_params(task.name).get("use_classifier", task.name):
log.info("Name of the task is different than the classifier it should use")
continue
build_task_specific_modules(task, model, d_sent, d_emb, vocab, embedder, args)
def build_task_specific_modules(task, model, d_sent, d_emb, vocab, embedder, args):
""" Build task-specific components for a task and add them to model.
These include decoders, linear layers for linear models.
"""
#XXX We need to get the input size right for the final classification layers
if args.special_task:
if task.subspace == 'syn':
#d_sent = args.k_syn
if not 'adv' in task.name:
d_sent = args.k_syn + args.k_shared
else:
d_sent = args.k_syn
if task.subspace == 'sem':
#d_sent = args.k_sem
if not 'adv' in task.name:
d_sent = args.k_sem + args.k_shared
else:
d_sent = args.k_sem
#XXX
task_params = model._get_task_params(task.name)
if isinstance(task, SingleClassificationTask): # CoLA, for example
module = build_single_sentence_module(
task=task, d_inp=d_sent, use_bert=model.use_bert, params=task_params
)
setattr(model, "%s_mdl" % task.name, module)
elif isinstance(task, (PairClassificationTask, PairRegressionTask, PairOrdinalRegressionTask)): # MNLI, for example
module = build_pair_sentence_module(task, d_sent, model=model, params=task_params)
setattr(model, "%s_mdl" % task.name, module)
elif isinstance(task, LanguageModelingParsingTask):
# The LM Parsing task does not support embeddings that use skip_embs.
hid2voc = build_lm(task, d_sent, args)
setattr(model, "%s_hid2voc" % task.name, hid2voc)
setattr(model, "%s_mdl" % task.name, hid2voc)
elif isinstance(task, LanguageModelingTask):
d_sent = args.d_hid + (args.skip_embs * d_emb)
hid2voc = build_lm(task, d_sent, args)
setattr(model, "%s_hid2voc" % task.name, hid2voc)
elif isinstance(task, SpanClassificationTask):
module = build_span_classifier(task, d_sent, task_params)
setattr(model, "%s_mdl" % task.name, module)
elif isinstance(task, TaggingTask):
hid2tag = build_tagger(task, d_sent, task.num_tags)
setattr(model, "%s_mdl" % task.name, hid2tag)
elif isinstance(task, MultipleChoiceTask):
module = build_multiple_choice_module(
task, d_sent, use_bert=model.use_bert, params=task_params
)
setattr(model, "%s_mdl" % task.name, module)
elif isinstance(task, EdgeProbingTask):
module = EdgeClassifierModule(task, d_sent, task_params)
setattr(model, "%s_mdl" % task.name, module)
elif isinstance(task, SequenceGenerationTask):
decoder, hid2voc = build_decoder(task, d_sent, vocab, embedder, args)
setattr(model, "%s_decoder" % task.name, decoder)
setattr(model, "%s_hid2voc" % task.name, hid2voc)
elif isinstance(task, (MultiRCTask, ReCoRDTask)):
module = build_qa_module(task, d_sent, model.use_bert, task_params)
setattr(model, "%s_mdl" % task.name, module)
else:
raise ValueError("Module not found for %s" % task.name)
def get_task_specific_params(args, task_name):
""" Search args for parameters specific to task.
Args:
args: main-program args, a config.Params object
task_name: (string)
Returns:
AllenNLP Params object of task-specific params.
"""
def _get_task_attr(attr_name, default=None):
return config.get_task_attr(args, task_name, attr_name, default)
params = {}
params["cls_type"] = _get_task_attr("classifier")
params["d_hid"] = _get_task_attr("classifier_hid_dim")
params["pool_type"] = _get_task_attr("pool_type")
params["d_proj"] = _get_task_attr("d_proj")
params["shared_pair_attn"] = args.shared_pair_attn
if args.shared_pair_attn:
params["attn"] = args.pair_attn
params["d_hid_attn"] = args.d_hid_attn
params["dropout"] = args.classifier_dropout
else:
params["attn"] = _get_task_attr("pair_attn")
params["d_hid_attn"] = _get_task_attr("d_hid_attn")
params["dropout"] = _get_task_attr("classifier_dropout")
# Used for span/edge classification. Other tasks can safely ignore.
params["cls_loss_fn"] = _get_task_attr("span_classifier_loss_fn")
params["cls_span_pooling"] = _get_task_attr("classifier_span_pooling")
params["edgeprobe_cnn_context"] = _get_task_attr("edgeprobe_cnn_context")
# For NLI probing tasks, might want to use a classifier trained on
# something else (typically 'mnli').
cls_task_name = _get_task_attr("use_classifier")
# default to this task
params["use_classifier"] = cls_task_name or task_name
return Params(params)
def build_image_sent_module(task, d_inp, params):
pooler = Pooler(project=True, d_inp=d_inp, d_proj=params["d_proj"])
return pooler
def build_single_sentence_module(task, d_inp: int, use_bert: bool, params: Params):
""" Build a single sentence classifier
args:
- task (Task): task object, used to get the number of output classes
- d_inp (int): input dimension to the module, needed for optional linear projection
- use_bert (bool): if using BERT, skip projection before pooling.
- params (Params): Params object with task-specific parameters
returns:
- SingleClassifier (nn.Module): single-sentence classifier consisting of
(optional) a linear projection, pooling, and an MLP classifier
"""
pooler = Pooler(
project=not use_bert,
d_inp=d_inp,
d_proj=params["d_proj"],
pool_type=params["pool_type"]
)
d_out = d_inp if use_bert else params["d_proj"]
classifier = Classifier.from_params(d_out, task.n_classes, params)
log.info(f'Task {task.name} has a classifier with d_out {d_out}')
module = SingleClassifier(pooler, classifier)
return module
def build_pair_sentence_module(task, d_inp, model, params):
""" Build a pair classifier, shared if necessary """
def build_pair_attn(d_in, d_hid_attn):
""" Build the pair model """
d_inp_model = 2 * d_in
modeling_layer = s2s_e.by_name("lstm").from_params(
Params(
{
"input_size": d_inp_model,
"hidden_size": d_hid_attn,
"num_layers": 1,
"bidirectional": True,
}
)
)
pair_attn = AttnPairEncoder(model.vocab, modeling_layer, dropout=params["dropout"])
return pair_attn
# Build the "pooler", which does pools a variable length sequence
# possibly with a projection layer beforehand
if params["attn"] and not model.use_bert:
pooler = Pooler(project=False, d_inp=params["d_hid_attn"], d_proj=params["d_hid_attn"])
d_out = params["d_hid_attn"] * 2
else:
pooler = Pooler(
project=not model.use_bert,
d_inp=d_inp,
d_proj=params["d_proj"],
pool_type=params["pool_type"],
)
d_out = d_inp if model.use_bert else params["d_proj"]
# Build an attention module if necessary
if params["shared_pair_attn"] and params["attn"] and not model.use_bert: # shared attn
if not hasattr(model, "pair_attn"):
pair_attn = build_pair_attn(d_inp, params["d_hid_attn"])
model.pair_attn = pair_attn
else:
pair_attn = model.pair_attn
elif params["attn"] and not model.use_bert: # non-shared attn
pair_attn = build_pair_attn(d_inp, params["d_hid_attn"])
else: # no attn
pair_attn = None
# Build the classifier
n_classes = task.n_classes if hasattr(task, "n_classes") else 1
if model.use_bert:
# BERT handles pair tasks by concatenating the inputs and classifying the joined
# sequence, so we use a single sentence classifier
if isinstance(task, WiCTask):
d_out *= 3 # also pass the two contextual word representations
log.info(f'Task {task.name} has a classifier with d_out {d_out}')
classifier = Classifier.from_params(d_out, n_classes, params)
module = SingleClassifier(pooler, classifier)
else:
d_out = d_out + d_inp if isinstance(task, WiCTask) else d_out
classifier = Classifier.from_params(4 * d_out, n_classes, params)
module = PairClassifier(pooler, classifier, pair_attn)
return module
def build_lm(task, d_inp, args):
""" Build LM components (just map hidden states to vocab logits) """
hid2voc = nn.Linear(d_inp, args.max_word_v_size)
return hid2voc
def build_span_classifier(task, d_sent, task_params):
module = SpanClassifierModule(task, d_sent, task_params, num_spans=task.num_spans)
return module
def build_tagger(task, d_inp, out_dim):
""" Build tagger components. """
hid2tag = nn.Linear(d_inp, out_dim)
return hid2tag
def build_multiple_choice_module(task, d_sent, use_bert, params):
""" Basic parts for MC task: reduce a vector representation for each model into a scalar. """
pooler = Pooler(
project=not use_bert, d_inp=d_sent, d_proj=params["d_proj"], pool_type=params["pool_type"]
)
d_out = d_sent if use_bert else params["d_proj"]
choice2scalar = Classifier(d_out, n_classes=1, cls_type=params["cls_type"])
return SingleClassifier(pooler, choice2scalar)
def build_decoder(task, d_inp, vocab, embedder, args):
""" Build a task specific decoder """
rnn = s2s_e.by_name("lstm").from_params(
Params(
{
"input_size": embedder.get_output_dim(),
"hidden_size": args.s2s["d_hid_dec"],
"num_layers": args.s2s["n_layers_dec"],
"bidirectional": False,
}
)
)
decoder = SentenceEncoder(vocab, embedder, 0, rnn)
hid2voc = nn.Linear(args.s2s["d_hid_dec"], args.max_word_v_size)
return decoder, hid2voc
def build_qa_module(task, d_inp, use_bert, params):
""" Build a simple QA module that
1) pools representations (either of the joint (context, question, answer) or individually
2) projects down to two logits
3) classifier
This module models each question-answer pair _individually_ """
pooler = Pooler(
project=not use_bert, d_inp=d_inp, d_proj=params["d_proj"], pool_type=params["pool_type"]
)
d_out = d_inp if use_bert else params["d_proj"]
classifier = Classifier.from_params(d_out, 2, params)
return SingleClassifier(pooler, classifier)
class MultiTaskModel(nn.Module):
"""
Giant model with task-specific components and a shared word and sentence encoder.
This class samples the tasks passed in pretrained_tasks, and adds task specific components
to the model.
"""
def __init__(self, args, sent_encoder, vocab):
""" Args: sentence encoder """
super(MultiTaskModel, self).__init__()
self.sent_encoder = sent_encoder
self.vocab = vocab
self.utilization = Average() if args.track_batch_utilization else None
self.elmo = args.input_module == "elmo"
self.use_bert = bool(args.input_module.startswith("bert"))
self.sep_embs_for_skip = args.sep_embs_for_skip
#XXX: Dylan's code!
if args.special_task:
h_bert = sent_encoder.d_emb # might be sent_encoder.output_dim instead?
self.k_sem = args.k_sem
self.k_syn = args.k_syn
self.k_shared = args.k_shared
self.sem_proj = nn.Linear(h_bert, args.k_sem, bias=False) if args.special_task else lambda x : x
self.syn_proj = nn.Linear(h_bert, args.k_syn, bias=False) if args.special_task else lambda x : x
self.shared_proj = nn.Linear(h_bert, args.k_shared, bias=False) if args.special_task else None
self.spare_pooler = Pooler(
project=False,
d_inp=args.k_shared,
d_proj=0,
pool_type="first",
)
def ortho_output(private,shared,mask):
private = self.spare_pooler(private, mask)
shared = self.spare_pooler(shared, mask)
return torch.mm(private, shared.permute(1,0))
self.ortho = ortho_output
if args.special_task:
num_tasks = 2
pooler = Pooler(
project=False,
d_inp=args.k_shared,
d_proj=0,
pool_type="first",
)
if args.discriminator_hidden > 0:
classifier = nn.Sequential(
nn.Linear(args.k_shared, args.discriminator_hidden),
nn.Tanh(),
nn.LayerNorm(args.discriminator_hidden),
nn.Dropout(0.2),
nn.Linear(args.discriminator_hidden, num_tasks, bias=True)
)
else:
classifier = nn.Linear(args.k_shared, num_tasks, bias=True)
self.adv_discriminator = SingleClassifier(pooler, classifier)
else:
self.adv_discriminator = None
def forward(self, task, batch, predict=False):
"""
Pass inputs to correct forward pass
Args:
- task (tasks.Task): task for which batch is drawn
- batch (Dict[str:Dict[str:Tensor]]): dictionary of (field, indexing) pairs,
where indexing is a dict of the index namespace and the actual indices.
- predict (Bool): passed to task specific forward(). If true, forward()
should return predictions.
Returns:
- out: dictionary containing task outputs and loss if label was in batch
"""
if self.utilization is not None:
if "input1" in batch:
self.utilization(get_batch_utilization(batch["input1"]))
elif "input" in batch:
self.utilization(get_batch_utilization(batch["input"]))
if isinstance(task, SingleClassificationTask): #XXX CoLA is a SingleClassificationTask
out = self._single_sentence_forward(batch, task, predict)
elif isinstance(task, GLUEDiagnosticTask):
out = self._nli_diagnostic_forward(batch, task, predict)
elif isinstance( #XXX RTE is type PairClassificationTask
task, (PairClassificationTask, PairRegressionTask, PairOrdinalRegressionTask)
):
out = self._pair_sentence_forward(batch, task, predict)
elif isinstance(task, LanguageModelingTask):
if isinstance(self.sent_encoder._phrase_layer, ONLSTMStack) or isinstance(
self.sent_encoder._phrase_layer, PRPN
):
out = self._lm_only_lr_forward(batch, task)
else:
out = self._lm_forward(batch, task, predict)
elif isinstance(task, TaggingTask):
out = self._tagger_forward(batch, task, predict)
elif isinstance(task, MultipleChoiceTask):
out = self._mc_forward(batch, task, predict)
elif isinstance(task, EdgeProbingTask):
# Just get embeddings and invoke task module.
word_embs_in_context, sent_mask = self.sent_encoder(batch["input1"], task)
module = getattr(self, "%s_mdl" % task.name)
out = module.forward(batch, word_embs_in_context, sent_mask, task, predict)
elif isinstance(task, SequenceGenerationTask):
out = self._seq_gen_forward(batch, task, predict)
elif isinstance(task, (MultiRCTask, ReCoRDTask)):
out = self._multiple_choice_reading_comprehension_forward(batch, task, predict)
elif isinstance(task, SpanClassificationTask):
out = self._span_forward(batch, task, predict)
else:
raise ValueError("Task-specific components not found!")
return out
def _get_task_params(self, task_name):
""" Get task-specific Params, as set in build_module(). """
return getattr(self, "%s_task_params" % task_name)
def _get_classifier(self, task):
""" Get task-specific classifier, as set in build_module(). """
# TODO: replace this logic with task._classifier_name?
task_params = self._get_task_params(task.name)
use_clf = task_params["use_classifier"]
if use_clf in [None, "", "none"]:
use_clf = task.name # default if not set
return getattr(self, "%s_mdl" % use_clf)
def _single_sentence_forward(self, batch, task, predict):
out = {}
# embed the sentence
word_embs_in_context, sent_mask = self.sent_encoder(batch["input1"], task)
#XXX Dylan's code
if self.shared_proj != None:
if task.subspace == 'sem':
sem_in_context = self.sem_proj(word_embs_in_context)
if not 'adv' in task.name:
shared_in_context = self.shared_proj(word_embs_in_context)
word_embs_in_context = torch.cat([sem_in_context, shared_in_context], dim=-1)
shared_private = self.ortho(sem_in_context, shared_in_context, sent_mask)
else:
word_embs_in_context = sem_in_context
elif task.subspace == 'syn':
syn_in_context = self.syn_proj(word_embs_in_context) # Since CoLA is primarily syntactic
if not 'adv' in task.name:
shared_in_context = self.shared_proj(word_embs_in_context)
word_embs_in_context = torch.cat([syn_in_context, shared_in_context], dim=-1)
shared_private = self.ortho(syn_in_context, shared_in_context, sent_mask)
else:
word_embs_in_context = syn_in_context
#XXX
# pass to a task specific classifier
classifier = self._get_classifier(task)
logits = classifier(word_embs_in_context, sent_mask)
#XXX
if not 'adv' in task.name and self.adv_discriminator != None:
logits_shared = self.adv_discriminator(shared_in_context, sent_mask)
#XXX
out["logits"] = logits
out["n_exs"] = get_batch_size(batch)
if "labels" in batch: # means we should compute loss
if batch["labels"].dim() == 0:
labels = batch["labels"].unsqueeze(0)
elif batch["labels"].dim() == 1:
labels = batch["labels"]
else:
labels = batch["labels"].squeeze(-1)
#XXX
if not 'adv' in task.name and self.adv_discriminator != None:
task_id_labels = torch.zeros(labels.shape).to(labels.device).long()
out["loss_shared"] = F.cross_entropy(logits_shared, task_id_labels)
out["loss_orthogonality"] = shared_private.pow(2).sum()
out["loss"] = F.cross_entropy(logits, labels)
tagmask = batch.get("tagmask", None)
if not 'discriminator' in task.name:
task.update_metrics(logits, labels, tagmask=tagmask)
else:
task.update_metrics(logits_shared, task_id_labels, tagmask=tagmask)
#XXX
if predict:
if isinstance(task, RegressionTask):
if logits.ndimension() > 1:
assert (
logits.ndimension() == 2 and logits[-1] == 1
), "Invalid regression prediction dimensions!"
logits = logits.squeeze(-1)
out["preds"] = logits
else:
_, out["preds"] = logits.max(dim=1)
return out
def _nli_diagnostic_forward(self, batch, task, predict):
out = {}
# embed the sentence
classifier = self._get_classifier(task)
if self.use_bert:
sent, mask = self.sent_encoder(batch["inputs"], task)
logits = classifier(sent, mask)
else:
sent1, mask1 = self.sent_encoder(batch["input1"], task)
sent2, mask2 = self.sent_encoder(batch["input2"], task)
logits = classifier(sent1, sent2, mask1, mask2)
out["logits"] = logits
out["n_exs"] = get_batch_size(batch)
if "labels" in batch:
if batch["labels"].dim() == 0:
labels = batch["labels"].unsqueeze(0)
elif batch["labels"].dim() == 1:
labels = batch["labels"]
else:
labels = batch["labels"].squeeze(-1)
out["loss"] = F.cross_entropy(logits, labels)
# task.update_diagnostic_metrics(predicted, labels, batch)
task.update_diagnostic_metrics(logits, labels, batch)
if predict:
_, predicted = logits.max(dim=1)
out["preds"] = predicted
return out
def _span_forward(self, batch, task, predict):
sent_embs, sent_mask = self.sent_encoder(batch["input1"], task)
module = getattr(self, "%s_mdl" % task.name)
out = module.forward(batch, sent_embs, sent_mask, task, predict)
return out
def _pair_sentence_forward(self, batch, task, predict):
out = {}
# embed the sentence
classifier = self._get_classifier(task)
if self.use_bert:
sent, mask = self.sent_encoder(batch["inputs"], task)
#XXX Dylan's code
if self.shared_proj != None:
if task.subspace == 'sem':
sem_in_context = self.sem_proj(sent)
if not 'adv' in task.name:
shared_in_context = self.shared_proj(sent)
sent = torch.cat([sem_in_context, shared_in_context], dim=-1)
shared_private = self.ortho(sem_in_context, shared_in_context, mask)
else:
sent = sem_in_context
elif task.subspace == 'syn':
syn_in_context = self.syn_proj(sent) # Since CoLA is primarily syntactic
if not 'adv' in task.name:
shared_in_context = self.shared_proj(sent)
sent = torch.cat([syn_in_context, shared_in_context], dim=-1)
shared_private = self.ortho(syn_in_context, shared_in_context, mask)
else:
sent = syn_in_context
#XXX
# special case for WiC b/c we want to add representations of particular tokens
if isinstance(task, WiCTask):
logits = classifier(sent, mask, [batch["idx1"], batch["idx2"]])
else:
logits = classifier(sent, mask)
#XXX
if not 'adv' in task.name and self.adv_discriminator != None:
logits_shared = self.adv_discriminator(shared_in_context, mask)
#XXX
else:
sent1, mask1 = self.sent_encoder(batch["input1"], task)
sent2, mask2 = self.sent_encoder(batch["input2"], task)
if isinstance(task, WiCTask):
logits = classifier(sent1, sent2, mask1, mask2, [batch["idx1"]], [batch["idx2"]])
else:
logits = classifier(sent1, sent2, mask1, mask2)
out["logits"] = logits
out["n_exs"] = get_batch_size(batch)
tagmask = batch.get("tagmask", None)
if "labels" in batch:
labels = batch["labels"]
labels = labels.squeeze(-1) if len(labels.size()) > 1 else labels
if isinstance(task, RegressionTask):
logits = logits.squeeze(-1) if len(logits.size()) > 1 else logits
out["loss"] = F.mse_loss(logits, labels)
logits_np = logits.data.cpu().numpy()
labels_np = labels.data.cpu().numpy()
task.update_metrics(logits_np, labels_np, tagmask=tagmask)
else:
#XXX
if not 'adv' in task.name and self.adv_discriminator != None:
task_id_labels = torch.ones(labels.shape).to(labels.device).long()
out["loss_shared"] = F.cross_entropy(logits_shared, task_id_labels)
out["loss_orthogonality"] = shared_private.pow(2).sum()
#XXX
out["loss"] = F.cross_entropy(logits, labels)
if not 'discriminator' in task.name:
task.update_metrics(logits, labels, tagmask=tagmask)
else:
task.update_metrics(logits_shared, task_id_labels, tagmask=tagmask)
if predict:
if isinstance(task, RegressionTask):
if logits.ndimension() > 1:
assert (
logits.ndimension() == 2 and logits[-1] == 1
), "Invalid regression prediction dimensions!"
logits = logits.squeeze(-1)
out["preds"] = logits
else:
_, out["preds"] = logits.max(dim=1)
return out
def _seq_gen_forward(self, batch, task, predict):
""" For variational autoencoder """
out = {}
sent, sent_mask = self.sent_encoder(batch["inputs"], task)
out["n_exs"] = get_batch_size(batch)
if "targs" in batch:
pass
if predict:
pass
return out
def _tagger_forward(self, batch: dict, task: TaggingTask, predict: bool) -> dict:
"""
This function is for sequence tagging (one-to-one mapping between words and tags).
Args:
batch: a dict of inputs and target tags
task: TaggingTask
predict: (boolean) predict mode (not supported)
Returns
out: (dict)
- 'logits': output layer, dimension: [batchSize * task.max_seq_len, task.num_tags]
- 'loss': size average CE loss
"""
out = {}
# batch[inputs] only has one item
b_size, seq_len = list(batch["inputs"].values())[0].size()
seq_len -= 2
sent_encoder = self.sent_encoder
out["n_exs"] = get_batch_size(batch)
if not isinstance(sent_encoder, BiLMEncoder):
sent, mask = sent_encoder(batch["inputs"], task)
sent = sent.masked_fill(1 - mask.byte(), 0) # avoid NaNs
sent = sent[:, 1:-1, :]
hid2tag = self._get_classifier(task)
logits = hid2tag(sent)
logits = logits.view(b_size * seq_len, -1)
out["logits"] = logits
targs = batch["targs"]["words"][:, :seq_len].contiguous().view(-1)
if "mask" in batch:
# prevent backprop for tags generated for tokenization-introduced tokens
# such as word boundaries
mask = batch["mask"]
batch_mask = [mask[i][:seq_len] for i in range(b_size)]
batch_mask = torch.stack(batch_mask)
keep_idxs = torch.nonzero(batch_mask.view(-1).data).squeeze()
logits = logits.index_select(0, keep_idxs)
targs = targs.index_select(0, keep_idxs)
pad_idx = self.vocab.get_token_index(self.vocab._padding_token)
out["loss"] = F.cross_entropy(logits, targs, ignore_index=pad_idx)
task.scorer1(logits, targs)
return out
def _lm_forward(self, batch, task, predict):
"""Forward pass for LM model
Args:
batch: indexed input data
task: (Task obejct)
predict: (boolean) predict mode (not supported)
return:
out: (dict)
- 'logits': output layer, dimension: [batchSize * timeSteps * 2, outputDim]
first half: [:batchSize*timeSteps, outputDim] is output layer from
forward layer
second half: [batchSize*timeSteps:, outputDim] is output layer from
backward layer
- 'loss': size average CE loss
"""
out = {}
sent_encoder = self.sent_encoder
assert_for_log(
isinstance(sent_encoder._phrase_layer, BiLMEncoder),
"Not using LM for language modeling task!",
)
assert_for_log(
"targs" in batch and "words" in batch["targs"], "Batch missing target words!"
)
pad_idx = self.vocab.get_token_index(self.vocab._padding_token, "tokens")
b_size, seq_len = batch["targs"]["words"].size()
n_pad = batch["targs"]["words"].eq(pad_idx).sum().item()
out["n_exs"] = (b_size * seq_len - n_pad) * 2
sent, mask = sent_encoder(batch["input"], task)
sent = sent.masked_fill(1 - mask.byte(), 0) # avoid NaNs
# Split encoder outputs by direction
split = int(self.sent_encoder._phrase_layer.get_output_dim() / 2)
fwd, bwd = sent[:, :, :split], sent[:, :, split : split * 2]
if split * 2 < sent.size(2): # skip embeddings
out_embs = sent[:, :, split * 2 :]
fwd = torch.cat([fwd, out_embs], dim=2)
bwd = torch.cat([bwd, out_embs], dim=2)
# Forward and backward logits and targs
hid2voc = getattr(self, "%s_hid2voc" % task.name)
logits_fwd = hid2voc(fwd).view(b_size * seq_len, -1)
logits_bwd = hid2voc(bwd).view(b_size * seq_len, -1)
logits = torch.cat([logits_fwd, logits_bwd], dim=0)
out["logits"] = logits
trg_fwd = batch["targs"]["words"].view(-1)
trg_bwd = batch["targs_b"]["words"].view(-1)
targs = torch.cat([trg_fwd, trg_bwd], dim=0)
assert logits.size(0) == targs.size(0), "Number of logits and targets differ!"
out["loss"] = F.cross_entropy(logits, targs, ignore_index=pad_idx)
task.scorer1(out["loss"].item())
if predict:
pass
return out
def _mc_forward(self, batch, task, predict):
""" Forward for a multiple choice question answering task """
out = {}
logits = []
module = self._get_classifier(task)
if self.use_bert:
for choice_idx in range(task.n_choices):
sent, mask = self.sent_encoder(batch["choice%d" % choice_idx], task)
logit = module(sent, mask)
logits.append(logit)
out["n_exs"] = batch["choice0"]["bert_wpm_pretokenized"].size(0)
else:
ctx, ctx_mask = self.sent_encoder(batch["question"], task)
for choice_idx in range(task.n_choices):
sent, mask = self.sent_encoder(batch["choice%d" % choice_idx], task)
inp = torch.cat([ctx, sent], dim=1)
inp_mask = torch.cat([ctx_mask, mask], dim=1)
logit = module(inp, inp_mask)
logits.append(logit)
out["n_exs"] = batch["choice0"]["words"].size(0)
logits = torch.cat(logits, dim=1)
out["logits"] = logits
if "label" in batch:
labels = batch["label"]
out["loss"] = F.cross_entropy(logits, labels)
task.update_metrics(logits, labels)
if predict:
out["preds"] = logits.argmax(dim=-1)
return out
def _lm_only_lr_forward(self, batch, task):
"""Only left to right pass for LM model - non-bidirectional models.
Used for language modeling training only in one direction.
Args:
batch: indexed input data
task: (Task obejct)
return:
out: (dict)
- 'logits': output layer, dimension: [batchSize * timeSteps, outputDim]
is output layer from forward layer
- 'loss': size average CE loss
"""
out = {}
assert_for_log(
"targs" in batch and "words" in batch["targs"], "Batch missing target words!"
)
pad_idx = self.vocab.get_token_index(self.vocab._padding_token, "tokens")
b_size, seq_len = batch["targs"]["words"].size()
# pad_idx is the token used to pad till max_seq_len
n_pad = batch["targs"]["words"].eq(pad_idx).sum().item()
# No of examples: only left to right, every unit in the sequence length is
# a training example only once.
out["n_exs"] = b_size * seq_len - n_pad
sent, mask = self.sent_encoder(batch["input"], task)
sent = sent.masked_fill(1 - mask.byte(), 0)
hid2voc = getattr(self, "%s_hid2voc" % task.name)
logits = hid2voc(sent).view(b_size * seq_len, -1)
out["logits"] = logits
trg_fwd = batch["targs"]["words"].view(-1)
assert logits.size(0) == trg_fwd.size(0), "Number of logits and targets differ!"
out["loss"] = F.cross_entropy(logits, trg_fwd, ignore_index=pad_idx)
task.scorer1(out["loss"].item())
return out
def _multiple_choice_reading_comprehension_forward(self, batch, task, predict):
""" Forward call for multiple choice (selecting from a fixed set of answers)
reading comprehension (have a supporting paragraph).
Batch has a tensor of shape (n_questions, n_answers, n_tokens)
"""
out = {}
classifier = self._get_classifier(task)
if self.use_bert:
# if using BERT, we concatenate the passage, question, and answer
inp = batch["psg_qst_ans"]
ex_embs, ex_mask = self.sent_encoder(inp, task)
logits = classifier(ex_embs, ex_mask)
out["n_exs"] = inp["bert_wpm_pretokenized"].size(0)
else:
# else, we embed each independently and concat them
psg_emb, psg_mask = self.sent_encoder(batch["psg"], task)
qst_emb, qst_mask = self.sent_encoder(batch["qst"], task)
if "ans" in batch: # most QA tasks, e.g. MultiRC have explicit answer fields
ans_emb, ans_mask = self.sent_encoder(batch["ans"], task)
inp = torch.cat([psg_emb, qst_emb, ans_emb], dim=1)
inp_mask = torch.cat([psg_mask, qst_mask, ans_mask], dim=1)
out["n_exs"] = batch["ans"]["words"].size(0)
else: # ReCoRD inserts answer into the query
inp = torch.cat([psg_emb, qst_emb], dim=1)
inp_mask = torch.cat([psg_mask, qst_mask], dim=1)
out["n_exs"] = batch["qst"]["words"].size(0)
logits = classifier(inp, inp_mask)
out["logits"] = logits
if "label" in batch:
idxs = [(p, q) for p, q in zip(batch["psg_idx"], batch["qst_idx"])]
labels = batch["label"]
out["loss"] = F.cross_entropy(logits, labels)
if isinstance(task, ReCoRDTask):
# ReCoRD needs the answer string to compute F1
task.update_metrics(logits, batch["ans_str"], idxs)
else:
task.update_metrics(logits, labels, idxs)
if predict:
if isinstance(task, ReCoRDTask):
# for ReCoRD, we want the logits to make
# predictions across answer choices
# (which are spread across batches)
out["preds"] = logits
else:
out["preds"] = logits.argmax(dim=-1)
return out
def get_elmo_mixing_weights(self, tasks=[]):
""" Get elmo mixing weights from text_field_embedder. Gives warning when fails.
args:
- tasks (List[Task]): list of tasks that we want to get ELMo scalars for.
returns:
- params Dict[str:float]: dictionary maybe layers to scalar params
"""
params = {}
if self.elmo:
if not self.sep_embs_for_skip:
tasks = [None]
else:
tasks = [None] + tasks
for task in tasks:
if task:
params[task._classifier_name] = get_elmo_mixing_weights(
self.sent_encoder._text_field_embedder, task=task
)
else:
params["@pretrain@"] = get_elmo_mixing_weights(
self.sent_encoder._text_field_embedder, task=None
)
return params
| [] |
2024-01-10 | axgpt/mindsdb | mindsdb~integrations~handlers~file_handler~file_handler.py | import codecs
import csv
import json
import os
import tempfile
import traceback
from io import BytesIO, StringIO
from pathlib import Path
from urllib.parse import urlparse
import magic
import pandas as pd
import requests
from charset_normalizer import from_bytes
from mindsdb_sql import parse_sql
from mindsdb_sql.parser.ast import DropTables, Select
from mindsdb_sql.parser.ast.base import ASTNode
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import TextLoader, PyPDFLoader
from mindsdb.api.mysql.mysql_proxy.utilities.sql import query_df
from mindsdb.integrations.libs.base import DatabaseHandler
from mindsdb.integrations.libs.response import RESPONSE_TYPE
from mindsdb.integrations.libs.response import HandlerResponse as Response
from mindsdb.integrations.libs.response import HandlerStatusResponse as StatusResponse
from mindsdb.utilities import log
logger = log.getLogger(__name__)
DEFAULT_CHUNK_SIZE = 200
DEFAULT_CHUNK_OVERLAP = 50
def clean_cell(val):
if str(val) in ["", " ", " ", "NaN", "nan", "NA"]:
return None
return val
class FileHandler(DatabaseHandler):
"""
Handler for files
"""
name = "files"
def __init__(
self,
name=None,
file_storage=None,
connection_data={},
file_controller=None,
**kwargs,
):
super().__init__(name)
self.parser = parse_sql
self.fs_store = file_storage
self.custom_parser = connection_data.get("custom_parser", None)
self.clean_rows = connection_data.get("clean_rows", True)
self.chunk_size = connection_data.get("chunk_size", DEFAULT_CHUNK_SIZE)
self.chunk_overlap = connection_data.get("chunk_overlap", DEFAULT_CHUNK_OVERLAP)
self.file_controller = file_controller
def connect(self, **kwargs):
return
def disconnect(self, **kwargs):
return
def check_connection(self) -> StatusResponse:
return StatusResponse(True)
def query(self, query: ASTNode) -> Response:
if type(query) == DropTables:
for table_identifier in query.tables:
if (
len(table_identifier.parts) == 2
and table_identifier.parts[0] != self.name
):
return Response(
RESPONSE_TYPE.ERROR,
error_message=f"Can't delete table from database '{table_identifier.parts[0]}'",
)
table_name = table_identifier.parts[-1]
try:
self.file_controller.delete_file(table_name)
except Exception as e:
return Response(
RESPONSE_TYPE.ERROR,
error_message=f"Can't delete table '{table_name}': {e}",
)
return Response(RESPONSE_TYPE.OK)
elif type(query) == Select:
table_name = query.from_table.parts[-1]
file_path = self.file_controller.get_file_path(table_name)
df, _columns = self._handle_source(
file_path,
self.clean_rows,
self.custom_parser,
self.chunk_size,
self.chunk_overlap,
)
result_df = query_df(df, query)
return Response(RESPONSE_TYPE.TABLE, data_frame=result_df)
else:
return Response(
RESPONSE_TYPE.ERROR,
error_message="Only 'select' and 'drop' queries allowed for files",
)
def native_query(self, query: str) -> Response:
ast = self.parser(query, dialect="mindsdb")
return self.query(ast)
@staticmethod
def _handle_source(
file_path,
clean_rows=True,
custom_parser=None,
chunk_size=DEFAULT_CHUNK_SIZE,
chunk_overlap=DEFAULT_CHUNK_OVERLAP,
):
"""
This function takes a file path and returns a pandas dataframe
"""
# get file data io, format and dialect
data, fmt, dialect = FileHandler._get_data_io(file_path)
data.seek(0) # make sure we are at 0 in file pointer
if custom_parser:
header, file_data = custom_parser(data, fmt)
df = pd.DataFrame(file_data, columns=header)
elif fmt == "parquet":
df = pd.read_parquet(data)
elif fmt == "csv":
df = pd.read_csv(data, sep=dialect.delimiter, index_col=False)
elif fmt in ["xlsx", "xls"]:
data.seek(0)
df = pd.read_excel(data)
elif fmt == "json":
data.seek(0)
json_doc = json.loads(data.read())
df = pd.json_normalize(json_doc, max_level=0)
elif fmt == "txt" or fmt == "pdf":
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
if fmt == "txt":
loader = TextLoader(file_path, encoding="utf8")
docs = text_splitter.split_documents(loader.load())
df = pd.DataFrame(
[
{"content": doc.page_content, "metadata": doc.metadata}
for doc in docs
]
)
elif fmt == "pdf":
loader = PyPDFLoader(file_path)
docs = text_splitter.split_documents(loader.load_and_split())
df = pd.DataFrame(
[
{"content": doc.page_content, "metadata": doc.metadata}
for doc in docs
]
)
else:
raise ValueError(
"Could not load file into any format, supported formats are csv, json, xls, xlsx, pdf, txt"
)
header = df.columns.values.tolist()
df = df.rename(columns={key: key.strip() for key in header})
df = df.applymap(clean_cell)
header = [x.strip() for x in header]
col_map = dict((col, col) for col in header)
return df, col_map
@staticmethod
def is_it_parquet(data: BytesIO) -> bool:
# Check first and last 4 bytes equal to PAR1.
# Refer: https://parquet.apache.org/docs/file-format/
parquet_sig = b"PAR1"
data.seek(0, 0)
start_meta = data.read(4)
data.seek(-4, 2)
end_meta = data.read()
data.seek(0)
if start_meta == parquet_sig and end_meta == parquet_sig:
return True
return False
@staticmethod
def is_it_xlsx(file_path: str) -> bool:
file_type = magic.from_file(file_path, mime=True)
if file_type in [
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.ms-excel",
]:
return True
return False
@staticmethod
def is_it_json(data_str: StringIO) -> bool:
# see if its JSON
text = data_str.read(100).strip()
data_str.seek(0)
if len(text) > 0:
# it it looks like a json, then try to parse it
if text.startswith("{") or text.startswith("["):
try:
json.loads(data_str.read())
return True
except Exception:
return False
finally:
data_str.seek(0)
return False
@staticmethod
def is_it_csv(data_str: StringIO) -> bool:
sample = data_str.readline() # trying to get dialect from header
data_str.seek(0)
try:
csv.Sniffer().sniff(sample)
# Avoid a false-positive for json files
try:
json.loads(data_str.read())
data_str.seek(0)
return False
except json.decoder.JSONDecodeError:
data_str.seek(0)
return True
except Exception:
return False
@staticmethod
def _get_data_io(file_path):
"""
@TODO: Use python-magic to simplify the function and detect the file types as the xlsx example
This gets a file either url or local file and defines what the format is as well as dialect
:param file: file path or url
:return: data_io, format, dialect
"""
data = BytesIO()
data_str = None
dialect = None
try:
with open(file_path, "rb") as fp:
data = BytesIO(fp.read())
except Exception as e:
error = "Could not load file, possible exception : {exception}".format(
exception=e
)
logger.error(error)
raise ValueError(error)
suffix = Path(file_path).suffix.strip(".").lower()
if suffix not in ("csv", "json", "xlsx", "parquet"):
if FileHandler.is_it_parquet(data):
suffix = "parquet"
elif FileHandler.is_it_xlsx(file_path):
suffix = "xlsx"
if suffix == "parquet":
return data, "parquet", dialect
if suffix == "xlsx":
return data, "xlsx", dialect
if suffix == "txt":
return data, "txt", dialect
if suffix == "pdf":
return data, "pdf", dialect
byte_str = data.read()
# Move it to StringIO
try:
# Handle Microsoft's BOM "special" UTF-8 encoding
if byte_str.startswith(codecs.BOM_UTF8):
data_str = StringIO(byte_str.decode("utf-8-sig"))
else:
file_encoding_meta = from_bytes(
byte_str[: 32 * 1024],
steps=32, # Number of steps/block to extract from my_byte_str
chunk_size=1024, # Set block size of each extraction)
explain=False,
)
best_meta = file_encoding_meta.best()
errors = "strict"
if best_meta is not None:
encoding = file_encoding_meta.best().encoding
try:
data_str = StringIO(byte_str.decode(encoding, errors))
except UnicodeDecodeError:
encoding = "utf-8"
errors = "replace"
data_str = StringIO(byte_str.decode(encoding, errors))
else:
encoding = "utf-8"
errors = "replace"
data_str = StringIO(byte_str.decode(encoding, errors))
except Exception:
logger.error(traceback.format_exc())
logger.error("Could not load into string")
if suffix not in ("csv", "json"):
if FileHandler.is_it_json(data_str):
suffix = "json"
elif FileHandler.is_it_csv(data_str):
suffix = "csv"
if suffix == "json":
return data_str, suffix, dialect
if suffix == "csv":
try:
dialect = FileHandler._get_csv_dialect(data_str)
if dialect:
return data_str, "csv", dialect
except Exception:
logger.error("Could not detect format for this file")
logger.error(traceback.format_exc())
data_str.seek(0)
data.seek(0)
# No file type identified
return data, None, dialect
@staticmethod
def _get_file_path(path) -> str:
try:
is_url = urlparse(path).scheme in ("http", "https")
except Exception:
is_url = False
if is_url:
path = FileHandler._fetch_url(path)
return path
@staticmethod
def _get_csv_dialect(buffer) -> csv.Dialect:
sample = buffer.readline() # trying to get dialect from header
buffer.seek(0)
try:
if isinstance(sample, bytes):
sample = sample.decode()
accepted_csv_delimiters = [",", "\t", ";"]
try:
dialect = csv.Sniffer().sniff(
sample, delimiters=accepted_csv_delimiters
)
dialect.doublequote = (
True # assume that all csvs have " as string escape
)
except Exception:
dialect = csv.reader(sample).dialect
if dialect.delimiter not in accepted_csv_delimiters:
raise Exception(
f"CSV delimeter '{dialect.delimiter}' is not supported"
)
except csv.Error:
dialect = None
return dialect
@staticmethod
def _fetch_url(url: str) -> str:
temp_dir = tempfile.mkdtemp(prefix="mindsdb_file_url_")
try:
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(os.path.join(temp_dir, "file"), "wb") as f:
for chunk in r:
f.write(chunk)
else:
raise Exception(f"Response status code is {r.status_code}")
except Exception as e:
logger.error(f"Error during getting {url}")
logger.error(e)
raise
return os.path.join(temp_dir, "file")
def get_tables(self) -> Response:
"""
List all files
"""
files_meta = self.file_controller.get_files()
data = [
{
"TABLE_NAME": x["name"],
"TABLE_ROWS": x["row_count"],
"TABLE_TYPE": "BASE TABLE",
}
for x in files_meta
]
return Response(RESPONSE_TYPE.TABLE, data_frame=pd.DataFrame(data))
def get_columns(self, table_name) -> Response:
file_meta = self.file_controller.get_file_meta(table_name)
result = Response(
RESPONSE_TYPE.TABLE,
data_frame=pd.DataFrame(
[
{
"Field": x["name"].strip()
if isinstance(x, dict)
else x.strip(),
"Type": "str",
}
for x in file_meta["columns"]
]
),
)
return result
| [] |
2024-01-10 | Alsace08/SumCoT | api_request.py | # -!- coding: utf-8 -!-
import openai
class Decoder:
def __init__(self, api_key):
self.api_key = api_key
def decode(self, input, model, max_length):
response = self.decoder_for_gpt3(model, input, max_length)
return response
def decoder_for_gpt3(self, model, input, max_length):
openai.api_key = self.api_key
if model == "gpt3":
engine = "text-ada-001"
elif model == "gpt3-medium":
engine = "text-babbage-001"
elif model == "gpt3-large":
engine = "text-curie-001"
elif model == "gpt3-xl":
engine = "text-davinci-002"
else:
raise ValueError("model is not properly defined ...")
response = openai.Completion.create(
engine=engine,
prompt=input,
max_tokens=max_length,
temperature=0,
stop=None
)
return response["choices"][0]["text"] | [
"INPUT"
] |
2024-01-10 | Alsace08/SumCoT | evaluation~metric.py | # -!- coding: utf-8 -!-
import json
import openai
import argparse
from rouge import Rouge
from bert_score import score
def rouge_score(ref, pred):
rouge = Rouge()
rs = rouge.get_scores(pred, ref)
rouge1 = rs[0]["rouge-1"]["f"] * 100
rouge2 = rs[0]["rouge-2"]["f"] * 100
rougel = rs[0]["rouge-l"]["f"] * 100
return rouge1, rouge2, rougel
def bs_score(ref, pred):
_, _, F1 = score([pred], [ref], lang="en", verbose=True)
bs = F1.mean()
return bs
class BatchEvaluation:
def __init__(self, total_r1=0, total_r2=0, total_rl=0, total_bs=0,
call_time_rs=0, call_time_bs=0):
self.ref = ""
self.pred = ""
self.total_r1 = total_r1
self.total_r2 = total_r2
self.total_rl = total_rl
self.total_bs = total_bs
self.call_time_rs = call_time_rs
self.call_time_bs = call_time_bs
def set_text(self, ref, pred):
self.ref = ref
self.pred = pred
return self
def get_rouge_score(self):
r1, r2, rl = rouge_score(self.ref, self.pred)
self.total_r1 += r1
self.total_r2 += r2
self.total_rl += rl
self.call_time_rs += 1
def get_bs_score(self):
bs = bs_score(self.ref, self.pred)
self.total_bs += bs
self.call_time_bs += 1
| [] |
2024-01-10 | noteable-io/origami | origami~models~rtu~channels~kernels.py | """
The kernels channel in RTU is primarily used for runtime updates like kernel and cell status,
variable explorer, and outputs vice document model changes on the files channel (adding cells,
updating content, etc)
"""
import uuid
from typing import Annotated, List, Literal, Optional, Union
from pydantic import BaseModel, Field
from origami.models.kernels import CellState, KernelStatusUpdate
from origami.models.rtu.base import BaseRTURequest, BaseRTUResponse, BooleanReplyData
class KernelsRequest(BaseRTURequest):
channel_prefix: Literal["kernels"] = "kernels"
class KernelsResponse(BaseRTUResponse):
channel_prefix: Literal["kernels"] = "kernels"
class KernelSubscribeRequestData(BaseModel):
file_id: uuid.UUID
class KernelSubscribeRequest(KernelsRequest):
event: Literal["subscribe_request"] = "subscribe_request"
data: KernelSubscribeRequestData
# Kernel status is returned on subscribe and also updated through kernel status updates
class KernelSubscribeReplyData(BaseModel):
success: bool
kernel_session: Optional[KernelStatusUpdate] = None # None if no Kernel is alive for a file
class KernelSubscribeReply(KernelsResponse):
event: Literal["subscribe_reply"] = "subscribe_reply"
data: KernelSubscribeReplyData
class KernelStatusUpdateResponse(KernelsResponse):
event: Literal["kernel_status_update_event"] = "kernel_status_update_event"
data: KernelStatusUpdate
# Cell State
class BulkCellStateUpdateData(BaseModel):
cell_states: List[CellState]
class BulkCellStateUpdateResponse(KernelsResponse):
event: Literal["bulk_cell_state_update_event"] = "bulk_cell_state_update_event"
data: BulkCellStateUpdateData
# Variable explorer updates return a list of current variables in the kernel
# On connect to a new Kernel, Clients can send a request to trigger an event. Otherwise events occur
# after cell execution automatically.
class VariableExplorerUpdateRequest(KernelsRequest):
event: Literal["variable_explorer_update_request"] = "variable_explorer_update_request"
# It is confusing but variable_explorer_update_request can either be an RTU client to Gate server
# (RTURequest) or also be propogated out by Gate from another client, meaning it comes in as a
# server-to-client (RTUResponse) so we need to model it just to avoid warning about unmodeled msgs
class VariableExplorerUpdateRequestPropogated(KernelsResponse):
event: Literal["variable_explorer_update_request"] = "variable_explorer_update_request"
data: dict = Field(default_factory=dict)
class VariableExplorerResponse(KernelsResponse):
event: Literal["variable_explorer_event"] = "variable_explorer_event"
class IntegratedAIRequestData(BaseModel):
prompt: str
# this may not be called on a specific cell, but at a specific point in time at a generic
# "document" level, so we don't require a cell_id
cell_id: Optional[str] = None
# if a cell_id is provided and this is True, the result will be added to the cell's output
# instead of just sent back as an RTU reply
output_for_response: bool = False
class IntegratedAIRequest(KernelsRequest):
event: Literal["integrated_ai_request"] = "integrated_ai_request"
data: IntegratedAIRequestData
class IntegratedAIReply(KernelsResponse):
event: Literal["integrated_ai_reply"] = "integrated_ai_reply"
data: BooleanReplyData
class IntegratedAIEvent(KernelsResponse):
event: Literal["integrated_ai_event"] = "integrated_ai_event"
# same data as the IntegratedAIRequest, just echoed back out
data: IntegratedAIRequestData
class IntegratedAIResultData(BaseModel):
# the full response from OpenAI; in most cases, sidecar will have either created a new cell
# or an output, so this result should really only be used when the RTU client needs it to exist
# outside of the cell/output structure
result: str
# this is sidecar to gate as a result of calling the OpenAIHandler method (OpenAI response,
# error, etc); after that, Gate propogates the data out as an IntegratedAIEvent
class IntegratedAIResult(KernelsRequest):
event: Literal["integrated_ai_result"] = "integrated_ai_result"
data: IntegratedAIResultData
class IntegratedAIResultReply(KernelsResponse):
event: Literal["integrated_ai_result_reply"] = "integrated_ai_result_reply"
data: BooleanReplyData
class IntegratedAIResultEvent(KernelsResponse):
event: Literal["integrated_ai_result_event"] = "integrated_ai_result_event"
data: IntegratedAIResultData
KernelRequests = Annotated[
Union[
KernelSubscribeRequest,
VariableExplorerUpdateRequest,
IntegratedAIRequest,
IntegratedAIResult,
],
Field(discriminator="event"),
]
KernelResponses = Annotated[
Union[
KernelSubscribeReply,
KernelStatusUpdateResponse,
BulkCellStateUpdateResponse,
VariableExplorerUpdateRequestPropogated,
VariableExplorerResponse,
IntegratedAIReply,
IntegratedAIResultReply,
IntegratedAIEvent,
IntegratedAIResultEvent,
],
Field(discriminator="event"),
]
| [] |
2024-01-10 | zundel48/guidance | guidance~llms~caches~_diskcache.py | import os
import diskcache
import platformdirs
from guidance.llms.caches import Cache
class DiskCache(Cache):
"""DiskCache is a cache that uses diskcache lib."""
def __init__(self, llm_name: str):
self._diskcache = diskcache.Cache(
os.path.join(
platformdirs.user_cache_dir("guidance"), f"_{llm_name}.diskcache"
)
)
def __getitem__(self, key: str) -> str:
return self._diskcache[key]
def __setitem__(self, key: str, value: str) -> None:
self._diskcache[key] = value
def __contains__(self, key: str) -> bool:
return key in self._diskcache
def clear(self):
self._diskcache.clear()
| [] |
2024-01-10 | Saptak625/Investify | text_simplifier.py | import openai
import os
import time
import json
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.getenv('OPENAI_KEY')
openai.api_key = OPENAI_API_KEY
def summarize(prompt):
reduced_prompt = ' '.join(prompt.replace('\n', ' ').split(' ')[:1600])
augmented_prompt = "summarize this text to 500 words: " + reduced_prompt
messages=[
{"role": "system", "content": "You are a helpful assistant that summarizes and simplifies Investopedia articles through your complete knowledge of finance and investing. You will also assist the user by answering questions about the article. If the user asks a question that is not relevant to the article or finance in general, you are REJECT THE REQUEST and state `As a personal financial educator, I cannot answer that question.`."},
{"role": "user", "content": augmented_prompt},
]
return ask(messages)
def ask(messages):
### STREAM CHATGPT API RESPONSES
delay_time = 0.01 # faster
max_response_length = 1500
start_time = time.time()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=1500,
stream=True
)
whole_answer = ''
for event in response:
# RETRIEVE THE TEXT FROM THE RESPONSE
event_time = time.time() - start_time # CALCULATE TIME DELAY BY THE EVENT
event_text = event['choices'][0]['delta'] # type: ignore # EVENT DELTA RESPONSE
answer = event_text.get('content', '') # RETRIEVE CONTENT
# STREAM THE ANSWER
if answer:
whole_answer += answer
# Convert string to byte string
answer = answer.encode('utf-8')
yield answer # Yield the response
time.sleep(delay_time)
yield json.dumps(messages + [{"role": "system", "content": whole_answer}])
if __name__ == '__main__':
text = '''Pete Rathburn is a copy editor and fact-checker with expertise in economics and personal finance and over twenty years of experience in the classroom. Investopedia / Laura Porter
The Altman Z-score is the output of a credit-strength test that gauges a publicly traded manufacturing company's likelihood of bankruptcy. The Altman Z-score, a variation of the traditional z-score in statistics, is based on five financial ratios that can be calculated from data found on a company's annual 10-K report. It uses profitability, leverage, liquidity, solvency, and activity to predict whether a company has a high probability of becoming insolvent.
NYU Stern Finance Professor Edward Altman developed the Altman Z-score formula in 1967, and it was published in 1968. Over the years, Altman has continued to reevaluate his Z-score. From 1969 until 1975, Altman looked at 86 companies in distress, then 110 from 1976 to 1995, and finally 120 from 1996 to 1999, finding that the Z-score had an accuracy of between 82% and 94%.
In 2012, he released an updated version called the Altman Z-score Plus that one can use to evaluate public and private companies, manufacturing and non-manufacturing companies, and U.S. and non-U.S. companies. One can use Altman Z-score Plus to evaluate corporate credit risk. The Altman Z-score has become a reliable measure of calculating credit risk.
One can calculate the Altman Z-score as follows:
Altman Z-Score = 1.2A + 1.4B + 3.3C + 0.6D + 1.0E
A score below 1.8 means it's likely the company is headed for bankruptcy, while companies with scores above 3 are not likely to go bankrupt. Investors can use Altman Z-scores to determine whether they should buy or sell a stock if they're concerned about the company's underlying financial strength. Investors may consider purchasing a stock if its Altman Z-Score value is closer to 3 and selling or shorting a stock if the value is closer to 1.8.
In more recent years, however, a Z-Score closer to 0 indicates a company may be in financial trouble. In a lecture given in 2019 titled "50 Years of the Altman Score," Professor Altman himself noted that recent data has shown that 0—not 1.8—is the figure at which investors should worry about a company's financial strength. The two-hour lecture is available to view for free on YouTube.
In 2007, the credit ratings of specific asset-related securities had been rated higher than they should have been. The Altman Z-score indicated that the companies' risks were increasing significantly and may have been heading for bankruptcy.
Altman calculated that the median Altman Z-score of companies in 2007 was 1.81. These companies' credit ratings were equivalent to a B. This indicated that 50% of the firms should have had lower ratings, were highly distressed and had a high probability of becoming bankrupt.
Altman's calculations led him to believe a crisis would occur and there would be a meltdown in the credit market. He believed the crisis would stem from corporate defaults, but the meltdown, which brought about the 2008 financial crisis, began with mortgage-backed securities (MBS). However, corporations soon defaulted in 2009 at the second-highest rate in history.
The Altman Z-score, a variation of the traditional z-score in statistics, is based on five financial ratios that can be calculated from data found on a company's annual 10-K report. The formula for Altman Z-Score is 1.2*(working capital / total assets) + 1.4*(retained earnings / total assets) + 3.3*(earnings before interest and tax / total assets) + 0.6*(market value of equity / total liabilities) + 1.0*(sales / total assets). Investors can use Altman Z-score Plus to evaluate corporate credit risk. A score below 1.8 signals the company is likely headed for bankruptcy, while companies with scores above 3 are not likely to go bankrupt. Investors may consider purchasing a stock if its Altman Z-Score value is closer to 3 and selling, or shorting, a stock if the value is closer to 1.8. In more recent years, Altman has stated a score closer to 0 rather than 1.8 indicates a company is closer to bankruptcy. In 2007, Altman's Z-score indicated that the companies' risks were increasing significantly. The median Altman Z-score of companies in 2007 was 1.81, which is very close to the threshold that would indicate a high probability of bankruptcy. Altman's calculations led him to believe a crisis would occur that would stem from corporate defaults, but the meltdown, which brought about the 2008 financial crisis, began with mortgage-backed securities (MBS); however, corporations soon defaulted in 2009 at the second-highest rate in history. NYU Stern. "Predicting Financial Distress of Companies: Revisiting the Z-Score and Zeta Models," Page 18. Accessed Nov. 19, 2021. NYU Stern. "Professor Edward Altman Launches Digital App for Renowned Z-Score, "Altman Z-Score Plus." Accessed Nov. 19, 2021. NYU Stern. "Predicting Financial Distress of Companies: Revisiting the Z-Score and Zeta Models," Page 26. Accessed Nov. 19, 2021. NYU Stern. "A 50-Year Retrospective on Credit Risk Models, the Altman Z-Score Family of Models and Their Applications to Financial Markets and Managerial Strategies," Page 20. Accessed Nov. 19, 2021. NYU Stern. "Special Report on Defaults and Returns in the High-Yield Bond Market: The Year 2007 in Review and Outlook," Pages 9-13 and 27. Accessed Nov. 19, 2021 NYU Stern. "Special Report on Defaults and Returns in the High-Yield Bond Market: The Year 2007 in Review and Outlook," Pages 9-13 and 26. Accessed Nov. 19, 2021. NYU Stern. "Special Report On Defaults and Returns in the High-Yield Bond and Distressed Debt Market: The Year 2009 in Review and Outlook," Page 3. Accessed Nov. 19, 2021. By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.'''
# summary = summarize(text)
# print(summary)
reduced_prompt = ' '.join(text.replace('\n', ' ').split(' ')[:1600])
messages=[
{"role": "system", "content": "You are a helpful assistant that summarizes and simplifies Investopedia articles through your complete knowledge of finance and investing. You will also assist the user by answering questions about the article."},
{"role": "user", "content": "summarize this text to 500 words: " + reduced_prompt},
{"role": "system", "content": """The Altman Z-score is a credit-strength test that measures a publicly traded manufacturing company's likelihood of bankruptcy. It is based on five financial ratios that can be calculated from data found on a company's annual 10-K report, and uses profitability, leverage, liquidity, solvency, and activity to predict whether a company has a high probability of becoming insolvent. The Altman Z-score was created by NYU Stern Finance Professor Edward Altman in 1967, and its accuracy was found to be between 82% and 94% when it was originally researched. The formula for the Altman Z-score is 1.2A + 1.4B + 3.3C + 0.6D + 1.0E, and a score below 1.8 means that the company is at risk of bankruptcy, while companies with a score above 3 are not likely to go bankrupt.
The Altman Z-Score Plus is an updated version of the Altman Z-Score and was released in 2012. It allows investors to evaluate both public and private manufacturing and non-manufacturing companies, as well as U.S. and non-U.S. companies, and can be used to evaluate corporate credit risk. If an investor is concerned about a company's underlying financial strength, they can use the Altman Z-score to determine whether they should buy or sell a stock. In more recent years, Professor Altman has stated that a score closer to 0 rather than 1.8 indicates a company is closer to bankruptcy.
In 2007, the credit ratings of certain asset-related securities were rated higher than they should have been. The Altman Z-score indicated that these companies' risks were significantly increasing and that they may have been heading for bankruptcy. The median Altman Z-score of companies in 2007 was 1.81, which is very close to the threshold that would indicate a high probability of bankruptcy. Altman's calculations led him to believe that a crisis would occur, stemming from corporate defaults, but the 2008 financial crisis began with mortgage-backed securities (MBS). However, corporations soon defaulted in 2009 at the second-highest rate in history.
Investors may consider purchasing a stock if its Altman Z-Score value is closer to 3 and selling, or shorting, a stock if the value is closer to 1.8. The Altman Z-Score has become a reliable measure of calculating credit risk, and the Altman Z-Score Plus provides investors with a more inclusive analysis."""},
{"role": "user", "content": "How can the Altman Z-Score be used to assess a company's financial health and predict its risk of bankruptcy?"},
]
g = ask(messages)
for m in g:
print(m, end='', flush=True) | [
"\n",
"You are a helpful assistant that summarizes and simplifies Investopedia articles through your complete knowledge of finance and investing. You will also assist the user by answering questions about the article.",
"How can the Altman Z-Score be used to assess a company's financial health and predict its risk of bankruptcy?",
"You are a helpful assistant that summarizes and simplifies Investopedia articles through your complete knowledge of finance and investing. You will also assist the user by answering questions about the article. If the user asks a question that is not relevant to the article or finance in general, you are REJECT THE REQUEST and state `As a personal financial educator, I cannot answer that question.`.",
"Pete Rathburn is a copy editor and fact-checker with expertise in economics and personal finance and over twenty years of experience in the classroom. Investopedia / Laura Porter The Altman Z-score is the output of a credit-strength test that gauges a publicly traded manufacturing company's likelihood of bankruptcy. The Altman Z-score, a variation of the traditional z-score in statistics, is based on five financial ratios that can be calculated from data found on a company's annual 10-K report. It uses profitability, leverage, liquidity, solvency, and activity to predict whether a company has a high probability of becoming insolvent. NYU Stern Finance Professor Edward Altman developed the Altman Z-score formula in 1967, and it was published in 1968. Over the years, Altman has continued to reevaluate his Z-score. From 1969 until 1975, Altman looked at 86 companies in distress, then 110 from 1976 to 1995, and finally 120 from 1996 to 1999, finding that the Z-score had an accuracy of between 82% and 94%. In 2012, he released an updated version called the Altman Z-score Plus that one can use to evaluate public and private companies, manufacturing and non-manufacturing companies, and U.S. and non-U.S. companies. One can use Altman Z-score Plus to evaluate corporate credit risk. The Altman Z-score has become a reliable measure of calculating credit risk. One can calculate the Altman Z-score as follows: Altman Z-Score = 1.2A + 1.4B + 3.3C + 0.6D + 1.0E A score below 1.8 means it's likely the company is headed for bankruptcy, while companies with scores above 3 are not likely to go bankrupt. Investors can use Altman Z-scores to determine whether they should buy or sell a stock if they're concerned about the company's underlying financial strength. Investors may consider purchasing a stock if its Altman Z-Score value is closer to 3 and selling or shorting a stock if the value is closer to 1.8. In more recent years, however, a Z-Score closer to 0 indicates a company may be in financial trouble. In a lecture given in 2019 titled \"50 Years of the Altman Score,\" Professor Altman himself noted that recent data has shown that 0—not 1.8—is the figure at which investors should worry about a company's financial strength. The two-hour lecture is available to view for free on YouTube. In 2007, the credit ratings of specific asset-related securities had been rated higher than they should have been. The Altman Z-score indicated that the companies' risks were increasing significantly and may have been heading for bankruptcy. Altman calculated that the median Altman Z-score of companies in 2007 was 1.81. These companies' credit ratings were equivalent to a B. This indicated that 50% of the firms should have had lower ratings, were highly distressed and had a high probability of becoming bankrupt. Altman's calculations led him to believe a crisis would occur and there would be a meltdown in the credit market. He believed the crisis would stem from corporate defaults, but the meltdown, which brought about the 2008 financial crisis, began with mortgage-backed securities (MBS). However, corporations soon defaulted in 2009 at the second-highest rate in history. The Altman Z-score, a variation of the traditional z-score in statistics, is based on five financial ratios that can be calculated from data found on a company's annual 10-K report. The formula for Altman Z-Score is 1.2*(working capital / total assets) + 1.4*(retained earnings / total assets) + 3.3*(earnings before interest and tax / total assets) + 0.6*(market value of equity / total liabilities) + 1.0*(sales / total assets). Investors can use Altman Z-score Plus to evaluate corporate credit risk. A score below 1.8 signals the company is likely headed for bankruptcy, while companies with scores above 3 are not likely to go bankrupt. Investors may consider purchasing a stock if its Altman Z-Score value is closer to 3 and selling, or shorting, a stock if the value is closer to 1.8. In more recent years, Altman has stated a score closer to 0 rather than 1.8 indicates a company is closer to bankruptcy. In 2007, Altman's Z-score indicated that the companies' risks were increasing significantly. The median Altman Z-score of companies in 2007 was 1.81, which is very close to the threshold that would indicate a high probability of bankruptcy. Altman's calculations led him to believe a crisis would occur that would stem from corporate defaults, but the meltdown, which brought about the 2008 financial crisis, began with mortgage-backed securities (MBS); however, corporations soon defaulted in 2009 at the second-highest rate in history. NYU Stern. \"Predicting Financial Distress of Companies: Revisiting the Z-Score and Zeta Models,\" Page 18. Accessed Nov. 19, 2021. NYU Stern. \"Professor Edward Altman Launches Digital App for Renowned Z-Score, \"Altman Z-Score Plus.\" Accessed Nov. 19, 2021. NYU Stern. \"Predicting Financial Distress of Companies: Revisiting the Z-Score and Zeta Models,\" Page 26. Accessed Nov. 19, 2021. NYU Stern. \"A 50-Year Retrospective on Credit Risk Models, the Altman Z-Score Family of Models and Their Applications to Financial Markets and Managerial Strategies,\" Page 20. Accessed Nov. 19, 2021. NYU Stern. \"Special Report on Defaults and Returns in the High-Yield Bond Market: The Year 2007 in Review and Outlook,\" Pages 9-13 and 27. Accessed Nov. 19, 2021 NYU Stern. \"Special Report on Defaults and Returns in the High-Yield Bond Market: The Year 2007 in Review and Outlook,\" Pages 9-13 and 26. Accessed Nov. 19, 2021. NYU Stern. \"Special Report On Defaults and Returns in the High-Yield Bond and Distressed Debt Market: The Year 2009 in Review and Outlook,\" Page 3. Accessed Nov. 19, 2021. By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.",
" ",
"The Altman Z-score is a credit-strength test that measures a publicly traded manufacturing company's likelihood of bankruptcy. It is based on five financial ratios that can be calculated from data found on a company's annual 10-K report, and uses profitability, leverage, liquidity, solvency, and activity to predict whether a company has a high probability of becoming insolvent. The Altman Z-score was created by NYU Stern Finance Professor Edward Altman in 1967, and its accuracy was found to be between 82% and 94% when it was originally researched. The formula for the Altman Z-score is 1.2A + 1.4B + 3.3C + 0.6D + 1.0E, and a score below 1.8 means that the company is at risk of bankruptcy, while companies with a score above 3 are not likely to go bankrupt.\n\nThe Altman Z-Score Plus is an updated version of the Altman Z-Score and was released in 2012. It allows investors to evaluate both public and private manufacturing and non-manufacturing companies, as well as U.S. and non-U.S. companies, and can be used to evaluate corporate credit risk. If an investor is concerned about a company's underlying financial strength, they can use the Altman Z-score to determine whether they should buy or sell a stock. In more recent years, Professor Altman has stated that a score closer to 0 rather than 1.8 indicates a company is closer to bankruptcy.\n\nIn 2007, the credit ratings of certain asset-related securities were rated higher than they should have been. The Altman Z-score indicated that these companies' risks were significantly increasing and that they may have been heading for bankruptcy. The median Altman Z-score of companies in 2007 was 1.81, which is very close to the threshold that would indicate a high probability of bankruptcy. Altman's calculations led him to believe that a crisis would occur, stemming from corporate defaults, but the 2008 financial crisis began with mortgage-backed securities (MBS). However, corporations soon defaulted in 2009 at the second-highest rate in history.\n\nInvestors may consider purchasing a stock if its Altman Z-Score value is closer to 3 and selling, or shorting, a stock if the value is closer to 1.8. The Altman Z-Score has become a reliable measure of calculating credit risk, and the Altman Z-Score Plus provides investors with a more inclusive analysis.",
"summarize this text to 500 words: PLACEHOLDER"
] |
2024-01-10 | mivanovitch/embedchain | embedchain~chunkers~youtube_video.py | from embedchain.chunkers.base_chunker import BaseChunker
from langchain.text_splitter import RecursiveCharacterTextSplitter
TEXT_SPLITTER_CHUNK_PARAMS = {
"chunk_size": 2000,
"chunk_overlap": 0,
"length_function": len,
}
class YoutubeVideoChunker(BaseChunker):
def __init__(self):
text_splitter = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS)
super().__init__(text_splitter) | [] |
2024-01-10 | mivanovitch/embedchain | embedchain~chunkers~qna_pair.py | from embedchain.chunkers.base_chunker import BaseChunker
from langchain.text_splitter import RecursiveCharacterTextSplitter
TEXT_SPLITTER_CHUNK_PARAMS = {
"chunk_size": 300,
"chunk_overlap": 0,
"length_function": len,
}
class QnaPairChunker(BaseChunker):
def __init__(self):
text_splitter = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS)
super().__init__(text_splitter)
| [] |
2024-01-10 | mivanovitch/embedchain | embedchain~loaders~pdf_file.py | from langchain.document_loaders import PyPDFLoader
from embedchain.utils import clean_string
class PdfFileLoader:
def load_data(self, url):
loader = PyPDFLoader(url)
output = []
pages = loader.load_and_split()
if not len(pages):
raise ValueError("No data found")
for page in pages:
content = page.page_content
content = clean_string(content)
meta_data = page.metadata
meta_data["url"] = url
output.append({
"content": content,
"meta_data": meta_data,
})
return output | [] |
2024-01-10 | mivanovitch/embedchain | embedchain~embedchain.py | import openai
import os
from dotenv import load_dotenv
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from embedchain.loaders.youtube_video import YoutubeVideoLoader
from embedchain.loaders.pdf_file import PdfFileLoader
from embedchain.loaders.web_page import WebPageLoader
from embedchain.loaders.local_qna_pair import LocalQnaPairLoader
from embedchain.chunkers.youtube_video import YoutubeVideoChunker
from embedchain.chunkers.pdf_file import PdfFileChunker
from embedchain.chunkers.web_page import WebPageChunker
from embedchain.chunkers.qna_pair import QnaPairChunker
from embedchain.vectordb.chroma_db import ChromaDB
load_dotenv()
embeddings = OpenAIEmbeddings()
ABS_PATH = os.getcwd()
DB_DIR = os.path.join(ABS_PATH, "db")
class EmbedChain:
def __init__(self, db=None):
"""
Initializes the EmbedChain instance, sets up a vector DB client and
creates a collection.
:param db: The instance of the VectorDB subclass.
"""
if db is None:
db = ChromaDB()
self.db_client = db.client
self.collection = db.collection
self.user_asks = []
def _get_loader(self, data_type):
"""
Returns the appropriate data loader for the given data type.
:param data_type: The type of the data to load.
:return: The loader for the given data type.
:raises ValueError: If an unsupported data type is provided.
"""
loaders = {
'youtube_video': YoutubeVideoLoader(),
'pdf_file': PdfFileLoader(),
'web_page': WebPageLoader(),
'qna_pair': LocalQnaPairLoader()
}
if data_type in loaders:
return loaders[data_type]
else:
raise ValueError(f"Unsupported data type: {data_type}")
def _get_chunker(self, data_type):
"""
Returns the appropriate chunker for the given data type.
:param data_type: The type of the data to chunk.
:return: The chunker for the given data type.
:raises ValueError: If an unsupported data type is provided.
"""
chunkers = {
'youtube_video': YoutubeVideoChunker(),
'pdf_file': PdfFileChunker(),
'web_page': WebPageChunker(),
'qna_pair': QnaPairChunker(),
}
if data_type in chunkers:
return chunkers[data_type]
else:
raise ValueError(f"Unsupported data type: {data_type}")
def add(self, data_type, url):
"""
Adds the data from the given URL to the vector db.
Loads the data, chunks it, create embedding for each chunk
and then stores the embedding to vector database.
:param data_type: The type of the data to add.
:param url: The URL where the data is located.
"""
loader = self._get_loader(data_type)
chunker = self._get_chunker(data_type)
self.user_asks.append([data_type, url])
self.load_and_embed(loader, chunker, url)
def add_local(self, data_type, content):
"""
Adds the data you supply to the vector db.
Loads the data, chunks it, create embedding for each chunk
and then stores the embedding to vector database.
:param data_type: The type of the data to add.
:param content: The local data. Refer to the `README` for formatting.
"""
loader = self._get_loader(data_type)
chunker = self._get_chunker(data_type)
self.user_asks.append([data_type, content])
self.load_and_embed(loader, chunker, content)
def load_and_embed(self, loader, chunker, url):
"""
Loads the data from the given URL, chunks it, and adds it to the database.
:param loader: The loader to use to load the data.
:param chunker: The chunker to use to chunk the data.
:param url: The URL where the data is located.
"""
embeddings_data = chunker.create_chunks(loader, url)
documents = embeddings_data["documents"]
metadatas = embeddings_data["metadatas"]
ids = embeddings_data["ids"]
# get existing ids, and discard doc if any common id exist.
existing_docs = self.collection.get(
ids=ids,
# where={"url": url}
)
existing_ids = set(existing_docs["ids"])
if len(existing_ids):
data_dict = {id: (doc, meta) for id, doc, meta in zip(ids, documents, metadatas)}
data_dict = {id: value for id, value in data_dict.items() if id not in existing_ids}
if not data_dict:
print(f"All data from {url} already exists in the database.")
return
ids = list(data_dict.keys())
documents, metadatas = zip(*data_dict.values())
self.collection.add(
documents=documents,
metadatas=metadatas,
ids=ids
)
print(f"Successfully saved {url}. Total chunks count: {self.collection.count()}")
def _format_result(self, results):
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
def get_openai_answer(self, prompt):
messages = []
messages.append({
"role": "user", "content": prompt
})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
temperature=0,
max_tokens=1000,
top_p=1,
)
return response["choices"][0]["message"]["content"]
def retrieve_from_database(self, input_query):
"""
Queries the vector database based on the given input query.
Gets relevant doc based on the query
:param input_query: The query to use.
:return: The content of the document that matched your query.
"""
result = self.collection.query(
query_texts=[input_query,],
n_results=1,
)
result_formatted = self._format_result(result)
content = result_formatted[0][0].page_content
return content
def generate_prompt(self, input_query, context):
"""
Generates a prompt based on the given query and context, ready to be passed to an LLM
:param input_query: The query to use.
:param context: Similar documents to the query used as context.
:return: The prompt
"""
prompt = f"""Use the following pieces of context to answer the query at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Query: {input_query}
Helpful Answer:
"""
return prompt
def get_answer_from_llm(self, prompt):
"""
Gets an answer based on the given query and context by passing it
to an LLM.
:param query: The query to use.
:param context: Similar documents to the query used as context.
:return: The answer.
"""
answer = self.get_openai_answer(prompt)
return answer
def query(self, input_query):
"""
Queries the vector database based on the given input query.
Gets relevant doc based on the query and then passes it to an
LLM as context to get the answer.
:param input_query: The query to use.
:return: The answer to the query.
"""
context = self.retrieve_from_database(input_query)
prompt = self.generate_prompt(input_query, context)
answer = self.get_answer_from_llm(prompt)
return answer
class App(EmbedChain):
"""
The EmbedChain app.
Has two functions: add and query.
adds(data_type, url): adds the data from the given URL to the vector db.
query(query): finds answer to the given query using vector database and LLM.
"""
pass
| [
"Use the following pieces of context to answer the query at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n PLACEHOLDER\n Query: PLACEHOLDER\n Helpful Answer:\n "
] |
2024-01-10 | mivanovitch/embedchain | embedchain~chunkers~web_page.py | from embedchain.chunkers.base_chunker import BaseChunker
from langchain.text_splitter import RecursiveCharacterTextSplitter
TEXT_SPLITTER_CHUNK_PARAMS = {
"chunk_size": 500,
"chunk_overlap": 0,
"length_function": len,
}
class WebPageChunker(BaseChunker):
def __init__(self):
text_splitter = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS)
super().__init__(text_splitter)
| [] |
2024-01-10 | mivanovitch/embedchain | embedchain~loaders~youtube_video.py | from langchain.document_loaders import YoutubeLoader
from embedchain.utils import clean_string
class YoutubeVideoLoader:
def load_data(self, url):
loader = YoutubeLoader.from_youtube_url(url, add_video_info=True)
doc = loader.load()
output = []
if not len(doc):
raise ValueError("No data found")
content = doc[0].page_content
content = clean_string(content)
meta_data = doc[0].metadata
meta_data["url"] = url
output.append({
"content": content,
"meta_data": meta_data,
})
return output
| [] |
2024-01-10 | mivanovitch/embedchain | embedchain~chunkers~pdf_file.py | from embedchain.chunkers.base_chunker import BaseChunker
from langchain.text_splitter import RecursiveCharacterTextSplitter
TEXT_SPLITTER_CHUNK_PARAMS = {
"chunk_size": 1000,
"chunk_overlap": 0,
"length_function": len,
}
class PdfFileChunker(BaseChunker):
def __init__(self):
text_splitter = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS)
super().__init__(text_splitter) | [] |
2024-01-10 | lvwerra/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | cliff-rosen/chatter | backend~chatter~data_processor~util_embed_chunks.py | import openai
from openai.embeddings_utils import get_embedding, cosine_similarity
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
import pinecone
import json
import re
import os
import sys
sys.path.append('db')
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import db
import local_secrets as secrets
"""
TO DO:
log error if upsert response not {'upserted_count': 1}
upsert in batches
"""
PINECONE_API_KEY = secrets.PINECONE_API_KEY
INDEX_NAME = "main-index"
OPENAI_API_KEY = secrets.OPENAI_API_KEY
openai.api_key = OPENAI_API_KEY
pinecone.init(api_key=PINECONE_API_KEY, environment="us-east1-gcp")
index = pinecone.Index(INDEX_NAME)
def get_openai_embedding(text):
embedding_model = "text-embedding-ada-002"
return get_embedding(
text,
engine="text-embedding-ada-002"
)
def run():
print("Starting embedding update for domain", domain_id)
conn = db.get_connection()
rows = db.get_document_chunks(conn, domain_id)
cur_count = 1
tot_count = len(rows)
print("Total chunks to be updated", tot_count)
for row in rows:
doc_chunk_id = row['doc_chunk_id']
chunk_text = row['chunk_text']
embedding = get_openai_embedding(chunk_text)
print("Processing ", cur_count, " of ", tot_count)
print(" Data: ", doc_chunk_id, embedding[:10])
db.update_document_chunk_embedding(conn, doc_chunk_id, embedding)
cur_count = cur_count + 1
db.close_connection(conn)
def fetch():
res = index.fetch(ids=['3'])
print(res['vectors']['3']['metadata'])
# runtime settings
domain_id = 27
print(index.describe_index_stats())
run()
| [] |
2024-01-10 | cliff-rosen/chatter | backend~chatter~utils~kb_service.py | from db import db
from utils.utils import num_tokens_from_string
from utils.logging import logging
import utils.openai_wrappers as model
import utils.pinecone_wrappers as vdb
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
COMPLETION_MODEL = 'text-davinci-003'
TEMPERATURE = 0.0
logger = logging.getLogger()
'''
add_document(domain_id, uri, title, text, blob)
update_document(doc_id, uri, title, text, blob)
del_document(doc_id)
get_document(doc_id)
get_chunk(doc_chunk_id)
get_parent_document(doc_chunk_id)
get_chunks_from-query(domain_id, query)
chunks dict:
{
ID: {
"id": ID as int,
"score": score as float,
"metadata": {
"doc_chunk_id": 44743.0,
"doc_id": 20657.0,
"domain_id": 27.0
},
"uri": uri,
"text": text,
"used": isUsed
}
}
{
"27": {
"id": 27,
"score": 0.737494111,
"metadata": {
"doc_chunk_id": 27.0,
"doc_id": 15.0,
"domain_id": 1.0
},
"uri": "Changes in Drug Level Laboratory Results _ DoseMe Help Center.pdf",
"text": "different, DoseMeRx will tend to prefer the one most like the population model (as this is more \ncommon in the population). Therefore, it may recommend a different dose than what would be \ncustomary for a patient if only the most recent result was considered.\nHere are two approaches to consider when this is encountered:\nIf the accuracy of the outlier drug level is questionable:\n\u0000. Consider obtaining another level if possible to validate the accuracy of the most recent \nlevel.\n\u0000. If you cannot obtain a level, exclude the last level and DoseMeRx will calculate the dose \nbased on the prior existing levels.\nIf the most recent drug level value is considered to be correct:\n9/14/23, 4:09 PM Changes in Drug Level Laboratory Results | DoseMe Help Center\nhttps://help.doseme-rx.com/en/articles/3353676-changes-in-drug-level-laboratory-results 2/2doseme-rx.com\n\u0000. Exclude earlier drug levels (if the last result is considered correct and you think a change \nhas taken place).",
"used": true
}
}
'''
def _embed_and_add_document_chunk(doc_id, chunk_text):
emb = model.get_embedding(chunk_text)
doc_chunk_id = db.insert_document_chunk(doc_id, chunk_text, emb)
return (emb, doc_chunk_id)
def _make_chunks_from_text(text):
chunks_splitter = RecursiveCharacterTextSplitter(
chunk_size = 1000,
chunk_overlap = 0,
length_function = len,
)
chunks = chunks_splitter.split_text(text)
print('Chunks produced:', len(chunks))
return chunks
# mutate chunks by adding {"uri": uri, "text", text} to each value dict
# chunks is dict where
# key is chunk_id, and value is obj with score, text
def _set_chunk_text_from_ids(chunks):
ids = list(chunks.keys())
rows = db.get_document_chunks_from_ids(ids)
for row in rows:
doc_chunk_id = row["doc_chunk_id"]
chunk_text = row["chunk_text"]
doc_uri = row["doc_uri"]
print(f"id: {doc_chunk_id}, text: {chunk_text[:20]}")
chunks[str(doc_chunk_id)]["uri"] = doc_uri
chunks[str(doc_chunk_id)]["text"] = chunk_text
# mutate chunks by adding {"uri": uri, "text", text} to each value dict
# chunks is dict where
# key is chunk_id, and value is obj with score, text
def _set_chunk_text_from_ids(chunks):
ids = list(chunks.keys())
rows = db.get_document_chunks_from_ids(ids)
for row in rows:
doc_chunk_id = row["doc_chunk_id"]
chunk_text = row["chunk_text"]
doc_uri = row["doc_uri"]
print(f"id: {doc_chunk_id}, text: {chunk_text[:20]}")
chunks[str(doc_chunk_id)]["uri"] = doc_uri
chunks[str(doc_chunk_id)]["text"] = chunk_text
def add_document(domain_id, uri, title, text, blob):
doc_id = db.insert_document(domain_id, uri, title, text, text)
chunks = _make_chunks_from_text(text)
for chunk in chunks:
(emb, doc_chunk_id) = _embed_and_add_document_chunk(doc_id, chunk)
vdb.upsert_index(doc_id, doc_chunk_id, emb, domain_id)
print('added chunk ', doc_chunk_id)
return doc_id
def delete_document(doc_id):
vdb.delete(doc_id, {})
db.delete_document(doc_id)
def delete_documents(doc_ids):
for doc_id in doc_ids:
vdb.delete_all_for_doc_id(doc_id)
db.delete_document(doc_id)
def get_chunks_from_query(domain_id, user_message):
chunks = {}
print("getting query embedding")
query_embedding = model.get_embedding(user_message)
print("getting chunks ids")
chunks = vdb.get_matching_chunks(domain_id, query_embedding)
if not chunks:
raise Exception('No chunks found - check index')
print("getting chunk text from ids")
_set_chunk_text_from_ids(chunks)
#logger.info(chunks)
return chunks
| [] |
2024-01-10 | cliff-rosen/chatter | backend~chatter~data_processor~step_2_chunk.py | import openai
from openai.embeddings_utils import get_embedding, cosine_similarity
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
import re
import os
import sys
#import sys
#sys.path.append('.\..')
from db import db
import local_secrets as secrets
"""
embedding length: 1536
Retrieve all documents for domain
For each document
break into chunks
for each chunk
get embedding
insert chunk with embedding into document_chunk table
"""
MIN_CHUNK_LENGTH = 20
MAX_CHUNK_LENGTH = 1500
OPENAI_API_KEY = secrets.OPENAI_API_KEY
openai.api_key = OPENAI_API_KEY
def get_openai_embedding(text):
embedding_model = "text-embedding-ada-002"
return get_embedding(
text,
engine="text-embedding-ada-002"
)
def get_all_docs_from_domain(conn, domain_id):
return db.get_all_docs_from_domain(conn, domain_id)
def get_docs_from_ids(conn, ids):
return db.get_docs_from_ids(conn, ids)
def get_chunks_from_text(text, maker_type):
if maker_type == "MAKER_2":
return get_chunks_from_text_maker_2(text)
if maker_type == "CHAR":
print('chunking with CharacterTextSplitter')
chunks_splitter = CharacterTextSplitter(
#separator = "\n\n",
separator = "\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function = len,
)
else:
print('chunking with RecursiveCharacterTextSplitter')
#text = re.sub('\s+', ' ', text)
chunks_splitter = RecursiveCharacterTextSplitter(
chunk_size = 1000,
chunk_overlap = 0,
length_function = len,
)
chunks = chunks_splitter.split_text(text)
return chunks
# create fragments, which are chunks delimited by \n\n
# chunks are fragments concatenated until a fragment is min 20 words
def get_chunks_from_text_maker_2(text):
print("chunk maker 2")
chunks = []
fragments = []
# clean input
text = text.encode(encoding='ASCII',errors='ignore').decode()
text.strip()
#text = re.sub('\s{3,}', '\n\n', text)
# build array of fragments by nn
fragments = text.split('\n\n')
# add array elements until reaching an element with at least 20 words
cur_chunk = ""
for i, fragment in enumerate(fragments):
cur_chunk = cur_chunk + '\n' + fragment
if len(cur_chunk) > 1 and (len(fragment.split()) >= 20 or i + 1 == len(fragments)):
cur_chunk = cur_chunk.strip()
if len(cur_chunk) > MIN_CHUNK_LENGTH:
chunks.append(cur_chunk)
cur_chunk = ""
return chunks
# runtime settings
#chunk_maker = "MAKER_2"
#chunk_maker = "CHAR"
chunk_maker = "MAKER_1"
domain_id = 1
#doc_ids = None
doc_ids = [53, 54, 55, 56, 57]
def run():
# init
conn = db.get_connection()
# one to one creation of chunks with embeddings
# FIX ME: should be upsertChunk() and not insertChunk()
if not doc_ids:
print("Retrieve documents for domain", domain_id)
rows = get_all_docs_from_domain(conn, domain_id)
else:
print("Retrieving documents: ", doc_ids)
rows = get_docs_from_ids(conn, doc_ids)
print("Retrieved: ", len(rows))
for doc_id, _domain_id, uri, doc_title, doc_text in rows:
print("****************************")
chunks = get_chunks_from_text(doc_text, chunk_maker)
print(uri, len(chunks))
for chunk in chunks:
print(doc_id, chunk[:50])
print("----------------------")
embedding = get_openai_embedding(chunk[:MAX_CHUNK_LENGTH])
db.insert_document_chunk(conn, doc_id, chunk, embedding)
# cleanup
db.close_connection(conn)
def write_to_file(text):
directory = 'chatter\data_processor\outputs'
dest = 'chunks.txt'
with open(os.path.join(directory, dest), 'a') as new_file:
new_file.write(text)
def test_chunker():
print("TEST: Retrieve documents for domain", domain_id)
conn = db.get_connection()
rows = get_all_docs_from_domain(conn, domain_id)
db.close_connection(conn)
for _doc_id, _domain_id, uri, _doc_title, doc_text in rows:
print("********************************")
print(uri)
chunks = get_chunks_from_text(SAMPLE_DOC, chunk_maker)
write_to_file('****************************************************************\n')
for chunk in chunks:
write_to_file(chunk + '\n==============\n')
write_to_file('\n\n')
def test_chunker_single_doc():
chunks = get_chunks_from_text(SAMPLE_DOC, chunk_maker)
for chunk in chunks:
write_to_file(chunk + '\n==============\n')
SAMPLE_DOC = """
Data Processing Steps
TO DO:
- add logging to capture links missing content, etc.
- step 2a: look for and remove irrelevant chunks
- check for empty or small pages ie from SPAs
- PREPARE -
1. Review site using inspect and establish which tag, tag id or class to spider
2. Create domain record with spider_notes with that info
3. Update the get_page_contents() to retrieve proper target
4. Update domain name and domain_id in 3 processing scripts
- RUN -
1. Step 1: spider side and populate document table
Verify get_page_contents retrieval logic
Set single to true
Set domain to "https://domain.com", with no / at end
Run script and verify:
console shows content found in correct section (i.e. id=main)
content written to page.txt is correct
spider_log has no errors
Change single to False and run Step 1 fully
Check logfile
Check db document table sorted by doc_uri
find duplicate doc_text
SELECT *
FROM document
WHERE domain_id = 31
ORDER BY doc_uri
2. Step 2: populate document_chunk from document records
set chunk_maker
set g_domain_id
run script
check logfile
check db document_chunks table
search for long chunks
SELECT length(chunk_text), dc.doc_chunk_id, dc.chunk_text, d.doc_uri
FROM document_chunk dc
JOIN document d ON dc.doc_id = d.doc_id
WHERE domain_id = 25
ORDER BY LENGTH(chunk_text) desc
LIMIT 100
search for redundant and useless chunks
SELECT dc.*
FROM document_chunk dc
JOIN document d ON dc.doc_id = d.doc_id
WHERE domain_id = 28
ORDER BY chunk_text
3. Step 3: update Pinecone index with chunks
set domain_id
run script
check logfile
verify:
select count(*) from document_chunk where domain_id = 22
select count(*) from document_chunk
compare with index count in Pinecone console
4. Create user
- TEST -
1. Login as user and test
- domain defaults to correct value
- "what does this company do" ; check chunks and response
Other tests:
check for chunks that are very long
SELECT domain_id, LENGTH(chunk_text), doc_chunk_id
FROM document_chunk dc
JOIN document d ON dc.doc_id = d.doc_id
WHERE LENGTH(chunk_text) > 2000
ORDER BY domain_id
"""
#####################################################
#clean_chunk = re.sub('\s+', ' ', chunk) | [] |
2024-01-10 | cliff-rosen/chatter | backend~chatter~data_processor~util_chunks.py | from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
import re
import os
import sys
sys.path.append('db')
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import db
import local_secrets as secrets
MIN_CHUNK_LENGTH = 20
def get_chunks_from_text_2(text):
print("chunker 2")
chunks = []
fragments = []
# clean input
text.strip()
text = re.sub('\s{3,}', '\n\n', text)
# built array of fragments by nn
fragments = text.split('\n\n')
# add array elements until reaching an element with at least 20 words
cur_chunk = ""
for i, fragment in enumerate(fragments):
cur_chunk = cur_chunk + '\n' + fragment
if len(cur_chunk) > 1 and (len(fragment.split()) >= 20 or i + 1 == len(fragments)):
cur_chunk = cur_chunk.strip()
if len(cur_chunk) > MIN_CHUNK_LENGTH:
chunks.append(cur_chunk)
cur_chunk = ""
return chunks
def write_text_to_file(file_path, text):
with open(file_path, 'w') as new_file:
#clean_chunk = re.sub('\s+', ' ', chunk_text)
#clean_chunk = clean_chunk.encode(encoding='ASCII',errors='ignore').decode()
new_file.write(text)
def write_chunks_to_file(file_path, chunks):
with open(file_path, 'w') as new_file:
for chunk in chunks:
#clean_chunk = re.sub('\s+', ' ', chunk)
chunk = chunk.encode(encoding='ASCII',errors='ignore').decode()
new_file.write(chunk + "\n------------------\n")
def run():
# runtime settings
doc_id = 5758
text = db.get_document(doc_id)[0]["doc_text"]
text = text.strip()
write_text_to_file("p1.txt", text)
chunks = get_chunks_from_text_2(text)
write_chunks_to_file("p1-c1.txt", chunks)
text = ""
chunks = get_chunks_from_text_2(text)
for chunk in chunks:
print("-------------------------------")
print(chunk)
| [] |
2024-01-10 | cliff-rosen/chatter | backend~chatter~_archive~x.py | from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.llms import OpenAI
from typing import Generic, TypeVar
class P():
x = 5
@classmethod
def print_c(cls):
print(cls)
def print_x(self):
print('x', self.x)
class C(P):
def __init__(self, x):
self.x = x
P.print_c()
C.print_c()
"""
llm = OpenAI(temperature=0)
text = "What would be a good company name a company that makes colorful socks?"
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
agent.run("What are the common adverse events in a hep A trial?")
class P():
def say_hello():
print('hello from P')
class C1(P):
def c1():
pass
class C2(P):
pass
class C3():
pass
#PX = Generic.__class_getitem__(TP)
TP = TypeVar('TP', bound=C1)
class X(Generic[TP]):
def __init__(self, p: TP):
self.p = p
def x1(self, p: TP):
print(p)
return p
#print(type(Generic[TP]))
#print("-----")
print(type(X))
print(type(X[int]))
class Y():
def __class_getitem__(cls, p):
return Y
#class X(Generic[TP]):
#class X(Generic.__class_getitem__(TP)):
class X(PX):
def __init__(self, p1: TP):
print("X type", type(p1))
#class Y(X[C1]):
class Y(X.__class_getitem__(C1)):
def __init__(self, p1: C1):
print("Y type", type(p1))
I = X.__class_getitem__(C1)
print(I)
#print(dir(Generic[TP]))
#print(dir(PX))
#print(dir(X))
#print(dir(Generic[TP]))
#if Generic[TP] == Generic.__class_getitem__(TP):
# print('yes')
T = TypeVar('T')
class A(Generic[T]):
def __init__(self):
self.items = []
def a(self, i: T):
print(i)
class B():
pass
def printt(x):
print(type(x))
class XC1(X[C1]):
pass
class XC2(X[C2]):
pass
class XC3(X[C3]):
pass
class A:
def __class_getitem__(cls, i):
pass
def x(a,b,c)
def x(a,b,c,d=10)
x(1,2,3,d=4)
class Hello():
def __call__(self, msg):
print('hello, ' + msg)
hello = Hello()
hello("there")
"""
| [] |
2024-01-10 | HavosAi/HavosAi | src~topic_modeling~visualize.py | import plotly.express as px
from sklearn.base import copy
from gensim.test.utils import common_dictionary
from gensim.models import CoherenceModel
import pandas as pd
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
from IPython.display import display
class TopicVisualizer:
def __init__(self, topic_pipe, df_texts, text_col, date_col):
"""
Parameters:
----------
topic_pipe: sklearn.pipeline.Pipeline
Fitted topic pipeline containing steps: `preprocessor`, `vectorizer`, `model`.
df_text: pd.DataFrame
"""
self.pipe = topic_pipe
self.df_texts = df_texts
self.text_col = text_col
self.date_col = date_col
self.transform()
def transform(self):
"""Transforms nested `df_texts` storing all intermediate steps."""
self.texts_prep = self.pipe.named_steps.preprocessor.transform(self.df_texts[self.text_col])
self.feat = self.pipe.named_steps.vectorizer.transform(self.texts_prep)
self.data_topics = self.pipe.named_steps.model.transform(self.feat)
return self.df_topics
@staticmethod
def _plot_top_words(model, feature_names, n_top_words, title):
n_components = len(model.components_)
fig, axes = plt.subplots(n_components // 5, 5, figsize=(30, 1.5 * n_components), sharex=True)
axes = axes.flatten()
for topic_idx, topic in enumerate(model.components_):
top_features_ind = topic.argsort()[: -n_top_words - 1 : -1]
top_features = [feature_names[i] for i in top_features_ind]
weights = topic[top_features_ind]
ax = axes[topic_idx]
ax.barh(top_features, weights, height=0.7)
ax.set_title(f"Topic {topic_idx +1}", fontdict={"fontsize": 30})
ax.invert_yaxis()
ax.tick_params(axis="both", which="major", labelsize=20)
for i in "top right left".split():
ax.spines[i].set_visible(False)
fig.suptitle(title, fontsize=40)
plt.subplots_adjust(top=0.90, bottom=0.05, wspace=0.90, hspace=0.3)
plt.show()
@staticmethod
def _get_top_words(model, feature_names, n_top_words, join=' | '):
for topic_idx, topic in enumerate(model.components_):
top_features_ind = topic.argsort()[: -n_top_words - 1 : -1]
top_features = [feature_names[i] for i in top_features_ind]
if join is None:
yield top_features
else:
yield join.join(top_features)
def calculate_coherence_from_n_topics(self, n_components_it=[2, 5, 10, 15, 20, 25, 30], coherence='c_v'):
dictionary = common_dictionary.from_documents(self.texts_prep)
display(dictionary)
scores = {}
for n_components in tqdm(n_components_it):
model = copy.copy(self.pipe.named_steps.model)
model.n_components = n_components
model.fit(self.feat)
topics = self._get_top_words(model, self.pipe.named_steps.vectorizer.get_feature_names(), 5, None)
cm = CoherenceModel(
topics=topics,
dictionary=dictionary,
texts=self.texts_prep,
coherence=coherence
)
scores[n_components] = cm.get_coherence()
return scores
def plot_coherence_from_n_topics(self, n_components_it=[2, 5, 10, 15, 20, 25, 30], coherence='c_v'):
scores_dct = self.calculate_coherence_from_n_topics(n_components_it, coherence)
scores_ser = pd.Series(scores_dct, name='coherence')
return px.line(scores_ser, title=f'Coherence "{coherence}" by number of topics')
def plot_top_keywords(self, n_words=20, title='Top words'):
return self._plot_top_words(
self.pipe.named_steps.model,
self.pipe.named_steps.vectorizer.get_feature_names(),
n_words,
title)
def get_top_words(self, n_words=5, join=' | '):
return list(self._get_top_words(
self.pipe.named_steps.model,
self.pipe.named_steps.vectorizer.get_feature_names(),
n_words,
join
))
@property
def df_topics(self):
return pd.DataFrame(
self.data_topics,
columns=self.get_top_words(n_words=3),
index=self.df_texts.index
)
@property
def df_top_topic_for_doc(self):
return self.df_topics.agg(['idxmax', 'max'], axis=1).sort_values('max').join(self.df_texts)
@property
def df_top_doc_for_topic(self):
return self.df_topics.agg(['max', 'idxmax']).T.merge(self.df_texts, left_on='idxmax', right_index=True).rename(columns={'max': 'weight'})
def plot_topic_trend(self, min_score=0.2):
df_topics_by_date_gr = (self.df_topics[self.df_topics > min_score]
.join(self.df_texts[self.date_col])
.rename_axis(columns='topic')
.groupby(
pd.Grouper(key=self.date_col, freq='m')
)
)
return px.line(
df_topics_by_date_gr.count().stack().to_frame('count').reset_index(),
x=self.date_col,
y='count',
facet_col='topic',
facet_col_wrap=3,
height=900,
)
def plot_doc_by_top_topic(self, text_col):
text_col = text_col or self.text_col
return px.box(
self.df_top_topic_for_doc,
facet_col='idxmax',
facet_col_wrap=3,
x='max',
points='all',
hover_data=[text_col],
height=800
)
def plot_topic_weight_distribution(self, **kwargs):
default_kwargs = dict(x='weight', log_y=True, facet_col='topic', height=900, facet_col_wrap=3)
default_kwargs.update(kwargs)
return px.histogram(self.df_topics.stack().to_frame('weight').reset_index().query('weight > 0').rename(columns={'level_1': 'topic'}), **default_kwargs) | [] |
2024-01-10 | aws-samples/conversational-ai-llms-with-amazon-lex-and-sagemaker | src~bot_dispatcher~sm_utils~sm_langchain_sample.py | """Summary
"""
from typing import List, Any, Dict
from langchain.memory import ConversationBufferMemory
from langchain import PromptTemplate, SagemakerEndpoint, ConversationChain
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.schema import BaseMemory
from pydantic import BaseModel, Extra
import json
class SagemakerContentHandler(LLMContentHandler):
"""Helper class to parse Sagemaker input/output
"""
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Parse input into required format for Sagemaker
Args:
prompt (str): LLM Prompt
model_kwargs (Dict): model tuning paramters
Returns:
bytes: Description
"""
input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
"""Parse sagemaker output. Return the first generated text as chatbot response
Args:
output (bytes): Bytes output from Sagemaker
Returns:
str: Chat response
"""
response_json = json.loads(output.read().decode("utf-8"))
print(response_json)
return response_json['generated_texts'][0]
class LexConversationalMemory(BaseMemory, BaseModel):
"""Langchain Custom Memory class that uses Lex Conversation history
Attributes:
history (dict): Dict storing conversation history that acts as the Langchain memory
lex_conv_context (str): LexV2 sessions API that serves as input for convo history
Memory is loaded from here
memory_key (str): key to for chat history Langchain memory variable - "history"
"""
history = {}
memory_key = "chat_history" #pass into prompt with key
lex_conv_context = ""
def clear(self):
"""Clear chat history
"""
self.history = {}
@property
def memory_variables(self) -> List[str]:
"""Load memory variables
Returns:
List[str]: List of keys containing Langchain memory
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory from lex into current Langchain session memory
Args:
inputs (Dict[str, Any]): User input for current Langchain session
Returns:
Dict[str, str]: Langchain memory object
"""
input_text = inputs[list(inputs.keys())[0]]
ccontext = json.loads(self.lex_conv_context)
memory = {
self.memory_key: ccontext[self.memory_key] + input_text + "\nAI: ",
}
return memory
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Load memory from lex + current input into Langchain session memory
Args:
inputs (Dict[str, Any]): User input
outputs (Dict[str, str]): Langchain response from calling LLM
"""
input_text = inputs[list(inputs.keys())[0]]
output_text = outputs[list(outputs.keys())[0]]
ccontext = json.loads(self.lex_conv_context)
self.history = {
self.memory_key: ccontext[self.memory_key] + input_text + f"\nAI: {output_text}",
}
class SagemakerLangchainBot():
"""Create a langchain.ConversationChain using a Sagemaker endpoint as the LLM
Attributes:
chain (langchain.ConversationChain): Langchain chain that invokes the Sagemaker endpoint hosting an LLM
"""
def __init__(self, prompt_template,
sm_endpoint_name,
lex_conv_history="",
region_name="" ):
"""Create a SagemakerLangchainBot client
Args:
prompt_template (str): Prompt template
sm_endpoint_name (str): Sagemaker endpoint name
lex_conv_history (str, optional): Lex convo history from LexV2 sessions API. Empty string for no history (first chat)
region_name (str, optional): region where Sagemaker endpoint is deployed
"""
prompt = PromptTemplate(
input_variables=["chat_history", "input"],
template=prompt_template
)
# Sagemaker endpoint for the LLM. Pass in arguments for tuning the model and
sm_flant5_llm = SagemakerEndpoint(
endpoint_name=sm_endpoint_name,
region_name=region_name,
content_handler=SagemakerContentHandler(),
model_kwargs={"temperature":2.0,"max_length":50, "num_return_sequences":3, "top_k":50, "top_p":0.95, "do_sample":True}
)
# Create a conversation chain using the prompt, llm hosted in Sagemaker, and custom memory class
self.chain = ConversationChain(
llm=sm_flant5_llm,
prompt=prompt,
memory=LexConversationalMemory(lex_conv_context=lex_conv_history),
verbose=True
)
def call_llm(self,user_input) -> str:
"""Call the Sagemaker endpoint hosting the LLM by calling ConversationChain.predict()
Args:
user_input (str): User chat input
Returns:
str: Sagemaker response to display as chat output
"""
output = self.chain.predict(
input=user_input
)
print("call_llm - input :: "+user_input)
print("call_llm - output :: "+output)
return output | [
"chat_history",
"input"
] |
2024-01-10 | DigData-ai/token-analyzer | spade~gradio_spade.py | import openai
import gradio as gr
from gradio.components import Radio
openai.api_key = "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"
messages = [{"role": "system", "content": "You are GPT-4, answer questions \
if only they are related to crypto currency else return 'it is out of my scope'."}]
def generate_response(prompt):
mode = "question-answer"
if mode == "question-answer":
result = openai.ChatCompletion.create(model="gpt-3.5-turbo-0301",
messages=messages + [{"role": "user", "content": prompt}])
return result['choices'][0]['message']['content']
elif mode == "prompt-to-sql":
openai.api_type = "azure"
openai.api_base = "https://test -digdata.openai.azure.com/"
openai.api_version = "2022-12-01"
openai.api_key = '1c60ef61808b4590b3c6c5d5c86be3ed'
response = openai.Completion.create(
engine="code-davinci-002",
prompt=f"### mySQL tables, with their properties:\n#\n# Employee(id, name, department_id)\n# Department(id, name, address)\n# Salary_Payments(id, employee_id, amount, date)\n#\n###\
{prompt}\n\nSELECT",
temperature=0,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
best_of=1,
stop=["#",";"])
openai.api_key = "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"
return response.choices[0].text
inputs = [
gr.inputs.Textbox(lines=5, label="Question/Prompt", placeholder="Type a question or SQL prompt here..."),
]
outputs = gr.outputs.Textbox(label="Answer")
title = "GPT-4"
description = "GPT-4 is a question answering system that can answer questions related to crypto currency. \
It can also generate SQL queries from a prompt."
examples = [
["What is the price of Bitcoin?", "Question-Answer"],
["What is the price of Ethereum?", "Question-Answer"],
["What is the price of Dogecoin?", "Question-Answer"],
["What is the price of Cardano?", "Question-Answer"],
]
gr.Interface(generate_response, inputs, outputs, title=title, description=description, examples=examples).launch() | [
"You are GPT-4, answer questions if only they are related to crypto currency else return 'it is out of my scope'.",
"content",
"### mySQL tables, with their properties:\n#\n# Employee(id, name, department_id)\n# Department(id, name, address)\n# Salary_Payments(id, employee_id, amount, date)\n#\n### PLACEHOLDER\n\nSELECT"
] |
2024-01-10 | bjoernpl/GermanBenchmark | dataset_translation~translate_mmlu.py | """
Copyright 2023 Björn Plüster
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datasets import load_dataset
from tqdm import tqdm
import guidance
import json
from pathlib import Path
_SUBJECTS = [
"abstract_algebra",
"anatomy",
"astronomy",
"business_ethics",
"clinical_knowledge",
"college_biology",
"college_chemistry",
"college_computer_science",
"college_mathematics",
"college_medicine",
"college_physics",
"computer_security",
"conceptual_physics",
"econometrics",
"electrical_engineering",
"elementary_mathematics",
"formal_logic",
"global_facts",
"high_school_biology",
"high_school_chemistry",
"high_school_computer_science",
"high_school_european_history",
"high_school_geography",
"high_school_government_and_politics",
"high_school_macroeconomics",
"high_school_mathematics",
"high_school_microeconomics",
"high_school_physics",
"high_school_psychology",
"high_school_statistics",
"high_school_us_history",
"high_school_world_history",
"human_aging",
"human_sexuality",
"international_law",
"jurisprudence",
"logical_fallacies",
"machine_learning",
"management",
"marketing",
"medical_genetics",
"miscellaneous",
"moral_disputes",
"moral_scenarios",
"nutrition",
"philosophy",
"prehistory",
"professional_accounting",
"professional_law",
"professional_medicine",
"professional_psychology",
"public_relations",
"security_studies",
"sociology",
"us_foreign_policy",
"virology",
"world_religions",
]
# set the default language model used to execute guidance programs
guidance.llm = guidance.llms.OpenAI("gpt-3.5-turbo", max_calls_per_min=1000, api_key=input("API Key: "))
structure_program = guidance(
'''
{{#system~}}
You are a helpful assistant that translates questions and answers from English to German.
{{~/system}}
{{#user~}}
Translate the following question and each of the multiple choice answers into German. Be as precise as possible. Keep the exact json format.
Translate only the values and not the keys. "_________" indicate blanks that should be kept in the translation. Do not answer anything else than the json.
{
"question": "How many planets does our solar system have?",
"A": "8",
"B": "9",
"C": "10",
"D": "All of the above"
}
{{~/user}}
{{#assistant~}}
{
"question": "Wie viele Planeten hat unser Sonnensystem?",
"A": "8",
"B": "9",
"C": "10",
"D": "Alle der oben genannten"
}
{{~/assistant}}
{{#user~}}
{
"question": {{input}},
"A": "{{a}}",
"B": "{{b}}",
"C": "{{c}}",
"D": "{{d}}"
}
{{~/user}}
{{#assistant~}}
{{gen 'output' temperature=0 top_p=1 stop="\\n}" max_tokens=1500}}
{{~/assistant}}
''', stream=False)
json_format = """
"question": "{question}",
"A": "{A}",
"B": "{B}",
"C": "{C}",
"D": "{D}"
"""
def contains_json(string):
return "{" in string and "}" in string and "\"question\"" in string and "\"A\"" in string and "\"B\"" in string and "\"C\"" in string and "\"D\"" in string
def fix_quotes(string):
if string[0] == "\"":
string = string[1:]
if string[-1] == "\"":
string = string[:-1]
string = string.replace("\"", "\\\"")
string = string.replace("\n", "\\n")
return string
def fix_parentheses(string):
string = string.replace("{", "\\{")
string = string.replace("}", "\\}")
return string
def get_question(string):
post_question = string.split("\"question\":")[1]
question = post_question.split("\"A\"")[0].strip()
if question[0] == "\"":
question = question[1:]
if question[-2:] == "\",":
question = question[:-2]
question = question.replace("\"", "\\\"")
question = question.replace("\n", "\\n")
question = question.replace("\\\",\\n\\\"", "\\n")
question = fix_parentheses(question)
return question
def get_choices(string):
choice_A = string.split("\"A\":")[1].split("\"B\"")[0].strip()[:-1]
choice_B = string.split("\"B\":")[1].split("\"C\"")[0].strip()[:-1]
choice_C = string.split("\"C\":")[1].split("\"D\"")[0].strip()[:-1]
choice_D = string.split("\"D\":")[1].split("}")[0].strip()
if choice_D.endswith(","):
choice_D = choice_D[:-1]
fix = lambda x: fix_quotes(fix_parentheses(x))
return [fix(choice_A), fix(choice_B), fix(choice_C), fix(choice_D)]
def is_valid_json(string):
try:
json.loads(string)
return True
except:
return False
def get_json(string):
if contains_json(string):
question = get_question(string)
choices = get_choices(string)
json_string = json_format.format(question=question, A=choices[0], B=choices[1], C=choices[2], D=choices[3])
json_string = "{" + json_string + "}"
if is_valid_json(json_string):
return json_string
else:
return None
else:
return None
mmlu = {name: load_dataset("tasksource/mmlu", name, split="validation") for name in _SUBJECTS}
total_len = sum(len(mmlu[name]) for name in _SUBJECTS)
print(f"Total length: {total_len} examples")
def translate_example(example):
question = example["question"]
try:
out = structure_program(
input=question,
a=example["choices"][0],
b=example["choices"][1],
c=example["choices"][2],
d=example["choices"][3]
)
except:
example["answer_de"] = out["output"]
example["question_de"] = ""
example["choices_de"] = ["", "", "", ""]
try:
translated = json.loads(get_json(out["output"]+"\n}"))
example["question_de"] = translated["question"]
example["choices_de"] = [translated["A"], translated["B"], translated["C"], translated["D"]]
example["answer_de"] = out["output"]+"\n}"
except:
if "{" in out["output"]:
output = "{" + out["output"].split("{")[1]
try:
translated = json.loads(output)
example["question_de"] = translated["question"]
example["choices_de"] = [translated["A"], translated["B"], translated["C"], translated["D"]]
example["answer_de"] = out["output"]+"\n}"
except:
example["answer_de"] = out["output"]
example["question_de"] = ""
example["choices_de"] = ["", "", "", ""]
else:
example["answer_de"] = out["output"]
example["question_de"] = ""
example["choices_de"] = ["", "", "", ""]
return example
Path("outputs_val_mmlu").mkdir(exist_ok=True)
# Translate the parts
for i, name in tqdm(enumerate(_SUBJECTS), total=len(_SUBJECTS)):
print(f"Translating {name} ({i+1}/{len(_SUBJECTS)})")
part = mmlu[name]
part = part.select(range(15)) if len(part) > 15 else part
p = part.map(translate_example, num_proc=8)
p = p.filter(lambda x: x["question_de"] != "")
print(len(p) > 6)
p.to_parquet(f"outputs_val_mmlu/{name}.parquet")
| [] |
2024-01-10 | bjoernpl/GermanBenchmark | dataset_translation~translate_truthfulqa.py | """
Copyright 2023 Björn Plüster
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datasets import load_dataset
from tqdm import tqdm, trange
import guidance
import json
from pathlib import Path
from datasets.utils.logging import disable_progress_bar
#disable_progress_bar()
# set the default language model used to execute guidance programs
guidance.llm = guidance.llms.OpenAI("gpt-3.5-turbo-0301", max_calls_per_min=5000, api_key="./openai_key.txt")
structure_program = guidance(
'''
{{#system~}}
You are a helpful assistant that translates json from English to German.
{{~/system}}
{{#user~}}
Translate the following json to german.
It consists of a question and multiple possible answers.
Be as precise as possible. Keep the exact json format and do not translate the keys.
{{input}}
{{~/user}}
{{#assistant~}}
{{gen 'output' temperature=1 top_p=1}}
{{~/assistant}}
''', stream=False)
question_options = ["question", "Frage", "frage"]
choices_options = ["choices", "Antworten", "Antwortmöglichkeiten", "Auswahlmöglichkeiten", "Möglichkeiten", "Optionen", "Aussagen", "Auswahlen", "möglichkeiten", "optionen", "aussagen", "auswahlen", "antworten", "antwortmöglichkeiten", "auswahlmöglichkeiten", "Auswahl", "auswahl"]
def get_question_and_choices(example):
question = None
choices = None
for q in question_options:
if q in example:
question = example[q]
break
for c in choices_options:
if c in example:
choices = example[c]
break
return question, choices
def manual_fix(translation):
print(translation)
print("Please enter the correct translation:")
new = input()
try:
json.loads(new)
return new
except Exception as e:
print("Invalid json, please try again")
return manual_fix(translation)
def translate_example(example, mc1=True):
targets = "mc1_targets" if mc1 else "mc2_targets"
other = "mc2_targets" if mc1 else "mc1_targets"
ex = {
"question": example["question"],
"choices": example[targets]["choices"]
}
try:
json_input = json.dumps(ex)
out = structure_program(
input=json_input
)
except Exception as e:
example["question_de"] = example.get("question_de", "")
example[targets+"_de"] = {"choices": [""]*len(example[targets]["choices"]), "labels": example[targets]["labels"]}
example[other+"_de"] = example.get(other+"_de", {"choices": [""]*len(example[other]["choices"]), "labels": example[other]["labels"]})
example["translation_de"+ ("1" if mc1 else "2")] = ""
example["translation_de"+ ("2" if mc1 else "1")] = example.get("translation_de"+ ("2" if mc1 else "1"), "")
print("first exception")
return example
try:
try:
translated = json.loads(out["output"])
except Exception as e:
translated = json.loads(manual_fix(out["output"]))
question, choices = get_question_and_choices(translated)
if question is None or choices is None:
print(translated.keys())
if question is None:
question = ""
if choices is None:
choices = [""]*len(example[targets]["choices"])
example["question_de"] = question
example[targets+"_de"] = {"choices": choices, "labels": example[targets]["labels"]}
example[other+"_de"] = example.get(other+"_de", {"choices": [""]*len(example[other]["choices"]), "labels": example[other]["labels"]})
example["translation_de"+ ("1" if mc1 else "2")] = out["output"]
example["translation_de"+ ("2" if mc1 else "1")] = example.get("translation_de"+ ("2" if mc1 else "1"), "")
except Exception as e:
example["question_de"] = example.get("question_de", "")
example[targets+"_de"] = {"choices": [""]*len(example[targets]["choices"]), "labels": example[targets]["labels"]}
example[other+"_de"] = example.get(other+"_de", {"choices": [""]*len(example[other]["choices"]), "labels": example[other]["labels"]})
example["translation_de"+ ("1" if mc1 else "2")] = out["output"] if "output" in out else ""
example["translation_de"+ ("2" if mc1 else "1")] = example.get("translation_de"+ ("2" if mc1 else "1"), "")
return example
dataset = load_dataset("truthful_qa", "multiple_choice", split="validation")
output_dir = Path("outputs_truthfulqa_de")
output_dir.mkdir(exist_ok=True)
num_shards = 10
for i in trange(num_shards, desc=f"Translating shards"):
shard = dataset.shard(num_shards=num_shards, index=i)
shard = shard.map(translate_example, num_proc=1)
shard = shard.map(translate_example, num_proc=1, fn_kwargs={"mc1": False})
shard.to_json(output_dir / f"{i:03d}.json")
# Combine shards
json_files = {
"validation": [str(x) for x in output_dir.glob(f"*.json")]
}
dataset = load_dataset("json", data_files=json_files)["validation"]
dataset.push_to_hub("bjoernp/truthful_qa_de")
# count examples with empty translation
empty = dataset.filter(lambda x: x["translation_de1"] == "")
print(f"Empty translations1 in dataset: {len(empty)}")
empty = dataset.filter(lambda x: x["translation_de2"] == "")
print(f"Empty translations2 in dataset: {len(empty)}")
# count examples with question translation
empty = dataset.filter(lambda x: x["question_de"] == "")
print(f"Empty question translations in dataset: {len(empty)}")
empty = dataset.filter(lambda x: x["mc1_targets_de"]["choices"]==None)#["choices"][0] == "")
print(f"Empty mc1 translations in dataset: {len(empty)}")
empty = dataset.filter(lambda x: x["mc1_targets_de"]["choices"]!=None and x["mc1_targets_de"]["choices"][0] == "")
print(f"Empty mc1 translations in dataset: {len(empty)}")
empty = dataset.filter(lambda x: x["mc2_targets_de"]["choices"]==None)#["choices"][0] == "")
print(f"Empty mc2 translations in dataset: {len(empty)}")
empty = dataset.filter(lambda x: x["mc2_targets_de"]["choices"]!=None and x["mc2_targets_de"]["choices"][0] == "")
print(f"Empty mc2 translations in dataset: {len(empty)}")
| [] |
2024-01-10 | bjoernpl/GermanBenchmark | dataset_translation~translate_arc.py | """
Copyright 2023 Björn Plüster
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datasets import load_dataset
from tqdm import tqdm, trange
import guidance
import json
from pathlib import Path
from datasets.utils.logging import disable_progress_bar
#disable_progress_bar()
# set the default language model used to execute guidance programs
guidance.llm = guidance.llms.OpenAI("gpt-3.5-turbo-0301", max_calls_per_min=5000, api_key="./openai_key.txt")
structure_program = guidance(
'''
{{#system~}}
You are a helpful assistant that translates json from English to German.
{{~/system}}
{{#user~}}
Translate the following json to german.
It consists of a question and four possible answer choices.
Be as precise as possible. Keep the exact json format and do not translate the keys.
{{input}}
{{~/user}}
{{#assistant~}}
{{gen 'output' temperature=0.5 top_p=1}}
{{~/assistant}}
''', stream=False)
labels = ["A", "B", "C", "D"]
def translate_example(example, depth=0):
ex = {
"question": example["question"],
"choices": example["choices"]["text"]
}
try:
json_input = json.dumps(ex)
if depth > 0:
out = structure_program(
input=json_input,
cache_seed=depth
)
else:
out = structure_program(
input=json_input
)
except Exception as e:
example["question_de"] = ""
example["choices_de"] = {"text": ["", "", "", ""], "label": labels}
example["translation_de"] = ""
return example
try:
translated = json.loads(out["output"])
example["question_de"] = translated["question"]
example["choices_de"] = {"text": translated["choices"], "label": labels}
example["translation_de"] = out.get("output", "")
except Exception as e:
if depth < 5:
return translate_example(example, depth=depth+1)
example["question_de"] = ""
example["choices_de"] = {"text": ["", "", "", ""], "label": labels}
example["translation_de"] = out.get("output", "")
return example
dataset = load_dataset("ai2_arc", "ARC-Challenge", split={"test": "test", "validation": "validation"})
output_dir = Path("outputs_arc_challenge_de")
output_dir.mkdir(exist_ok=True)
num_shards = 5
for split in ["test", "validation"]:
ds = dataset[split]
for i in trange(num_shards, desc=f"Translating {split} shards"):
shard = ds.shard(num_shards=num_shards, index=i)
shard = shard.map(translate_example, num_proc=16)
shard.to_json(output_dir / f"{split}-{i:03d}.json")
# Combine shards
json_files = {
"test": [str(x) for x in output_dir.glob(f"test-*.json")],
"validation": [str(x) for x in output_dir.glob(f"validation-*.json")]
}
dataset = load_dataset("json", data_files=json_files)
dataset.push_to_hub("bjoernp/arc_challenge_de")
for split in ["test", "validation"]:
ds = dataset[split]
# count examples with empty translation
empty = ds.filter(lambda x: x["translation_de"] == "")
print(f"Empty translations in {split}: {len(empty)}")
# count examples with question translation
empty = ds.filter(lambda x: x["question_de"] == "")
print(f"Empty question translations in {split}: {len(empty)}")
| [] |
2024-01-10 | bjoernpl/GermanBenchmark | dataset_translation~translate_hellaswag.py | """
Copyright 2023 Björn Plüster
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datasets import load_dataset
from tqdm import tqdm, trange
import guidance
import json
from pathlib import Path
from datasets.utils.logging import disable_progress_bar
import random
#disable_progress_bar()
# set the default language model used to execute guidance programs
guidance.llm = guidance.llms.OpenAI("gpt-3.5-turbo-0301", max_calls_per_min=5000, api_key="./openai_key.txt")
structure_program = guidance(
'''
{{#system~}}
You are a helpful assistant that translates json from English to German.
{{~/system}}
{{#user~}}
Translate the following json to german.
It consists of a context and four possible continuations. Make sure that the translation of each continuation is coherent with the context.
Be as precise as possible. Keep the exact json format and do not translate the keys.
{{input}}
{{~/user}}
{{#assistant~}}
{{gen 'output' temperature=0.5 top_p=1}}
{{~/assistant}}
''', stream=False)
def fix1(example):
translation = example["translation_de"] + "}"
try:
json.loads(translation)
return translation
except Exception as e:
raise e
def fix2(example):
translation = example["translation_de"].replace('"endings":', '"endings": [')
try:
json.loads(translation)
return translation
except Exception as e:
raise e
def fix3(example):
translation = example["translation_de"]
if "}" in translation and len(translation.split("}")[1]) > 0:
translation = translation.split("}")[0] + "}"
try:
json.loads(translation)
return translation
except Exception as e:
raise e
def attempt_fix(example):
try:
return fix1(example)
except Exception as e:
try:
return fix2(example)
except Exception as e:
try:
return fix3(example)
except Exception as e:
raise e
def translate_example(example, random_seed=False, depth=0):
ex = {
"activity_label": example["activity_label"],
"context": example["ctx"],
"endings": example["endings"]
}
try:
json_input = json.dumps(ex)
if random_seed:
out = structure_program(
input=json_input,
cache_seed=random.randint(0, 100000)
)
else:
out = structure_program(
input=json_input
)
except Exception as e:
example["activity_label_de"] = ""
example["ctx_de"] = ""
example["endings_de"] = ["", "", "", ""]
example["translation_de"] = ""
return example
try:
translated = json.loads(out["output"])
example["activity_label_de"] = translated["activity_label"]
example["ctx_de"] = translated["context"]
example["endings_de"] = translated["endings"]
example["translation_de"] = out["output"]
except Exception as e:
try:
translated = json.loads(attempt_fix(out))
example["activity_label_de"] = translated["activity_label"]
example["ctx_de"] = translated["context"]
example["endings_de"] = translated["endings"]
example["translation_de"] = out["output"]
except Exception as e:
if depth < 5:
return translate_example(example, random_seed=True, depth=depth+1)
example["activity_label_de"] = ""
example["ctx_de"] = ""
example["endings_de"] = ["", "", "", ""]
example["translation_de"] = out["output"] if "output" in out else ""
return example
dataset = load_dataset("hellaswag", split={"train": "train[:1000]", "validation": "validation"})
output_dir = Path("outputs_hellaswag_de")
output_dir.mkdir(exist_ok=True)
num_shards = 100
for split in ["train", "validation"]:
ds = dataset[split]
for i in trange(num_shards, desc=f"Translating {split} shards"):
shard = ds.shard(num_shards=num_shards, index=i)
shard = shard.map(translate_example, num_proc=32)
shard.to_json(output_dir / f"{split}-{i:03d}.json")
# Combine shards
json_files = {
"train": [str(x) for x in output_dir.glob(f"train-*.json")],
"validation": [str(x) for x in output_dir.glob(f"validation-*.json")]
}
dataset = load_dataset("json", data_files=json_files)
# dataset.to_json(output_dir / "hellaswag_de.json")
dataset.push_to_hub("bjoernp/hellaswag_de")
for split in ["train", "validation"]:
ds = dataset[split]
# count examples with empty translation
empty = ds.filter(lambda x: x["translation_de"] == "")
print(f"Empty translations in {split}: {len(empty)}")
# count examples with context translation
empty = ds.filter(lambda x: x["ctx_de"] == "")
print(f"Empty context translations in {split}: {len(empty)}")
| [] |
2024-01-10 | bachvarv/NLP_Test | corpus_prep~corpus_gen.py | import locale
import os.path
import sys
import openai
from decouple import config
import csv
openai.api_key = config('OPEN_API_KEY')
csv_file = os.path.join(os.pardir, os.path.join('simple_language', os.path.join('corpus', 'wikitext_simple.csv')))
def get_text_from_json(json_obj):
return json_obj.get('choices')[0]['text']
def summarize_2nd_grader(text):
response_en = openai.Completion.create(engine="text-davinci-001",
prompt=f'Summarize this for a second-grade student:\n\n{text}',
temperature=0.7,
max_tokens=300,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return get_text_from_json(response_en)
def translate_to_de(text):
response_de = openai.Completion.create(engine="text-davinci-001",
prompt=f'Translate this into German:\n\n{text}',
temperature=0.7,
max_tokens=300,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0)
return get_text_from_json(response_de)
def convert_text(text):
response_en = summarize_2nd_grader(text)
response_de = translate_to_de(response_en)
return response_de
def read_text(arg):
simple_text = ""
if not os.path.isfile(arg):
simple_text = convert_text(arg)
# save_text = input("Write a file path if you want to write the simplified text to a file: ")
# csv_file = os.path.join(os.pardir, os.path.join('simple_language', os.path.join('corpus', 'simple_language_openAI.csv')))
# csv_file = 'C:\MA\NLP_Test\simple_language\corpus\simple_language_openAI.csv'
arg = arg.replace('\n', '')
simple_text = simple_text.replace('\n', '')
if os.path.isfile(csv_file):
with open(csv_file, 'a', encoding='utf-8') as f:
s = [arg, simple_text]
print(s)
writer = csv.writer(f, delimiter='\t')
writer.writerow(s)
return simple_text
def create_csv(inp_arr):
for i in inp_arr:
for j in i:
read_text(j)
print('Written to the csv File')
# if __name__ == '__main__':
# argument = sys.argv[1]
# # print(argument)
# read_text(argument)
| [
"Translate this into German:\n\nPLACEHOLDER",
"Summarize this for a second-grade student:\n\nPLACEHOLDER"
] |
2024-01-10 | wissam-sib/dilbert | modeling_auto_dil.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
from dilbert import DilBert
from dilalbert import DilAlbert
import logging
from collections import OrderedDict
from transformers.configuration_auto import (
AlbertConfig,
AutoConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
#ElectraConfig,
FlaubertConfig,
GPT2Config,
OpenAIGPTConfig,
RobertaConfig,
T5Config,
TransfoXLConfig,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
)
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
AlbertForMaskedLM,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.modeling_bart import (
BART_PRETRAINED_MODEL_ARCHIVE_MAP,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
)
from transformers.modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertModel,
)
from transformers.modeling_camembert import (
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
CamembertForMaskedLM,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
)
from transformers.modeling_ctrl import CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRLLMHeadModel, CTRLModel
from transformers.modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
"""
from transformers.modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,
ElectraForMaskedLM,
ElectraForPreTraining,
ElectraForTokenClassification,
ElectraModel,
)"""
from transformers.modeling_flaubert import (
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_MAP, GPT2LMHeadModel, GPT2Model
from transformers.modeling_openai import OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP, OpenAIGPTLMHeadModel, OpenAIGPTModel
from transformers.modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
RobertaForMaskedLM,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_MAP, T5ForConditionalGeneration, T5Model
from transformers.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP, TransfoXLLMHeadModel, TransfoXLModel
from transformers.modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
XLMRobertaForMaskedLM,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
)
from transformers.modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
)
logger = logging.getLogger(__name__)
ALL_PRETRAINED_MODEL_ARCHIVE_MAP = dict(
(key, value)
for pretrained_map in [
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
T5_PRETRAINED_MODEL_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
#ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,
]
for key, value, in pretrained_map.items()
)
MODEL_MAPPING = OrderedDict(
[
(T5Config, T5Model),
(DistilBertConfig, DistilBertModel),
(AlbertConfig, AlbertModel),
(CamembertConfig, CamembertModel),
(XLMRobertaConfig, XLMRobertaModel),
(BartConfig, BartModel),
(RobertaConfig, RobertaModel),
(BertConfig, BertModel),
(OpenAIGPTConfig, OpenAIGPTModel),
(GPT2Config, GPT2Model),
(TransfoXLConfig, TransfoXLModel),
(XLNetConfig, XLNetModel),
(FlaubertConfig, FlaubertModel),
(XLMConfig, XLMModel),
(CTRLConfig, CTRLModel),
#(ElectraConfig, ElectraModel),
]
)
MODEL_FOR_PRETRAINING_MAPPING = OrderedDict(
[
(T5Config, T5ForConditionalGeneration),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForMaskedLM),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(BartConfig, BartForConditionalGeneration),
(RobertaConfig, RobertaForMaskedLM),
(BertConfig, BertForPreTraining),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(CTRLConfig, CTRLLMHeadModel),
#(ElectraConfig, ElectraForPreTraining),
]
)
MODEL_WITH_LM_HEAD_MAPPING = OrderedDict(
[
(T5Config, T5ForConditionalGeneration),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForMaskedLM),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(BartConfig, BartForConditionalGeneration),
(RobertaConfig, RobertaForMaskedLM),
(BertConfig, BertForMaskedLM),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(CTRLConfig, CTRLLMHeadModel),
#(ElectraConfig, ElectraForMaskedLM),
]
)
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(
[
(DistilBertConfig, DistilBertForSequenceClassification),
(AlbertConfig, AlbertForSequenceClassification),
(CamembertConfig, CamembertForSequenceClassification),
(XLMRobertaConfig, XLMRobertaForSequenceClassification),
(BartConfig, BartForSequenceClassification),
(RobertaConfig, RobertaForSequenceClassification),
(BertConfig, BertForSequenceClassification),
(XLNetConfig, XLNetForSequenceClassification),
(FlaubertConfig, FlaubertForSequenceClassification),
(XLMConfig, XLMForSequenceClassification),
]
)
MODEL_FOR_QUESTION_ANSWERING_MAPPING = OrderedDict(
[
(DistilBertConfig, DistilBertForQuestionAnswering),
(AlbertConfig, DilAlbert),
(RobertaConfig, RobertaForQuestionAnswering),
(BertConfig, DilBert),
(XLNetConfig, XLNetForQuestionAnsweringSimple),
(FlaubertConfig, FlaubertForQuestionAnsweringSimple),
(XLMConfig, XLMForQuestionAnsweringSimple),
]
)
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict(
[
(DistilBertConfig, DistilBertForTokenClassification),
(CamembertConfig, CamembertForTokenClassification),
(XLMConfig, XLMForTokenClassification),
(XLMRobertaConfig, XLMRobertaForTokenClassification),
(RobertaConfig, RobertaForTokenClassification),
(BertConfig, BertForTokenClassification),
(XLNetConfig, XLNetForTokenClassification),
(AlbertConfig, AlbertForTokenClassification),
#(ElectraConfig, ElectraForTokenClassification),
]
)
class AutoModel(object):
r"""
:class:`~transformers.AutoModel` is a generic model class
that will be instantiated as one of the base model classes of the library
when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)`
or the `AutoModel.from_config(config)` class methods.
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModel is designed to be instantiated "
"using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModel.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertModel` (DistilBERT model)
- isInstance of `roberta` configuration class: :class:`~transformers.RobertaModel` (RoBERTa model)
- isInstance of `bert` configuration class: :class:`~transformers.BertModel` (Bert model)
- isInstance of `openai-gpt` configuration class: :class:`~transformers.OpenAIGPTModel` (OpenAI GPT model)
- isInstance of `gpt2` configuration class: :class:`~transformers.GPT2Model` (OpenAI GPT-2 model)
- isInstance of `ctrl` configuration class: :class:`~transformers.CTRLModel` (Salesforce CTRL model)
- isInstance of `transfo-xl` configuration class: :class:`~transformers.TransfoXLModel` (Transformer-XL model)
- isInstance of `xlnet` configuration class: :class:`~transformers.XLNetModel` (XLNet model)
- isInstance of `xlm` configuration class: :class:`~transformers.XLMModel` (XLM model)
- isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertModel` (Flaubert model)
- isInstance of `electra` configuration class: :class:`~transformers.ElectraModel` (Electra model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModel.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in MODEL_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys())
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the base model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The base model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: :class:`~transformers.T5Model` (T5 model)
- contains `distilbert`: :class:`~transformers.DistilBertModel` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertModel` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertModel` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaModel` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaModel` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertModel` (Bert model)
- contains `openai-gpt`: :class:`~transformers.OpenAIGPTModel` (OpenAI GPT model)
- contains `gpt2`: :class:`~transformers.GPT2Model` (OpenAI GPT-2 model)
- contains `transfo-xl`: :class:`~transformers.TransfoXLModel` (Transformer-XL model)
- contains `xlnet`: :class:`~transformers.XLNetModel` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMModel` (XLM model)
- contains `ctrl`: :class:`~transformers.CTRLModel` (Salesforce CTRL model)
- contains `flaubert`: :class:`~transformers.FlaubertModel` (Flaubert model)
- contains `electra`: :class:`~transformers.ElectraModel` (Electra model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys())
)
)
class AutoModelForPreTraining(object):
r"""
:class:`~transformers.AutoModelForPreTraining` is a generic model class
that will be instantiated as one of the model classes of the library -with the architecture used for pretraining this model– when created with the `AutoModelForPreTraining.from_pretrained(pretrained_model_name_or_path)`
class method.
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForPreTraining is designed to be instantiated "
"using the `AutoModelForPreTraining.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForPreTraining.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model)
- isInstance of `roberta` configuration class: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model)
- isInstance of `bert` configuration class: :class:`~transformers.BertForPreTraining` (Bert model)
- isInstance of `openai-gpt` configuration class: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model)
- isInstance of `gpt2` configuration class: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model)
- isInstance of `ctrl` configuration class: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model)
- isInstance of `transfo-xl` configuration class: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model)
- isInstance of `xlnet` configuration class: :class:`~transformers.XLNetLMHeadModel` (XLNet model)
- isInstance of `xlm` configuration class: :class:`~transformers.XLMWithLMHeadModel` (XLM model)
- isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model)
- isInstance of `electra` configuration class: :class:`~transformers.ElectraForPreTraining` (Electra model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelForPreTraining.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in MODEL_FOR_PRETRAINING_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys())
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the model classes of the library -with the architecture used for pretraining this model– from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: :class:`~transformers.T5ModelWithLMHead` (T5 model)
- contains `distilbert`: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForMaskedLM` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertForMaskedLM` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForMaskedLM` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertForPreTraining` (Bert model)
- contains `openai-gpt`: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model)
- contains `gpt2`: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model)
- contains `transfo-xl`: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model)
- contains `xlnet`: :class:`~transformers.XLNetLMHeadModel` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMWithLMHeadModel` (XLM model)
- contains `ctrl`: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model)
- contains `flaubert`: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model)
- contains `electra`: :class:`~transformers.ElectraForPreTraining` (Electra model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path:
Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely received file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModelForPreTraining.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForPreTraining.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_PRETRAINING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys())
)
)
class AutoModelWithLMHead(object):
r"""
:class:`~transformers.AutoModelWithLMHead` is a generic model class
that will be instantiated as one of the language modeling model classes of the library
when created with the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)`
class method.
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelWithLMHead is designed to be instantiated "
"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelWithLMHead.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model)
- isInstance of `roberta` configuration class: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model)
- isInstance of `bert` configuration class: :class:`~transformers.BertForMaskedLM` (Bert model)
- isInstance of `openai-gpt` configuration class: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model)
- isInstance of `gpt2` configuration class: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model)
- isInstance of `ctrl` configuration class: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model)
- isInstance of `transfo-xl` configuration class: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model)
- isInstance of `xlnet` configuration class: :class:`~transformers.XLNetLMHeadModel` (XLNet model)
- isInstance of `xlm` configuration class: :class:`~transformers.XLMWithLMHeadModel` (XLM model)
- isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model)
- isInstance of `electra` configuration class: :class:`~transformers.ElectraForMaskedLM` (Electra model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelWithLMHead.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in MODEL_WITH_LM_HEAD_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys())
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the language modeling model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: :class:`~transformers.T5ModelWithLMHead` (T5 model)
- contains `distilbert`: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForMaskedLM` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertForMaskedLM` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForMaskedLM` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertForMaskedLM` (Bert model)
- contains `openai-gpt`: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model)
- contains `gpt2`: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model)
- contains `transfo-xl`: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model)
- contains `xlnet`: :class:`~transformers.XLNetLMHeadModel` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMWithLMHeadModel` (XLM model)
- contains `ctrl`: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model)
- contains `flaubert`: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model)
- contains `electra`: :class:`~transformers.ElectraForMaskedLM` (Electra model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path:
Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely received file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelWithLMHead.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_WITH_LM_HEAD_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys())
)
)
class AutoModelForSequenceClassification(object):
r"""
:class:`~transformers.AutoModelForSequenceClassification` is a generic model class
that will be instantiated as one of the sequence classification model classes of the library
when created with the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)`
class method.
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForSequenceClassification is designed to be instantiated "
"using the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForSequenceClassification.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertForSequenceClassification` (DistilBERT model)
- isInstance of `albert` configuration class: :class:`~transformers.AlbertForSequenceClassification` (ALBERT model)
- isInstance of `camembert` configuration class: :class:`~transformers.CamembertForSequenceClassification` (CamemBERT model)
- isInstance of `xlm roberta` configuration class: :class:`~transformers.XLMRobertaForSequenceClassification` (XLM-RoBERTa model)
- isInstance of `roberta` configuration class: :class:`~transformers.RobertaForSequenceClassification` (RoBERTa model)
- isInstance of `bert` configuration class: :class:`~transformers.BertForSequenceClassification` (Bert model)
- isInstance of `xlnet` configuration class: :class:`~transformers.XLNetForSequenceClassification` (XLNet model)
- isInstance of `xlm` configuration class: :class:`~transformers.XLMForSequenceClassification` (XLM model)
- isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertForSequenceClassification` (Flaubert model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the sequence classification model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: :class:`~transformers.DistilBertForSequenceClassification` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForSequenceClassification` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertForSequenceClassification` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForSequenceClassification` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaForSequenceClassification` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertForSequenceClassification` (Bert model)
- contains `xlnet`: :class:`~transformers.XLNetForSequenceClassification` (XLNet model)
- contains `flaubert`: :class:`~transformers.FlaubertForSequenceClassification` (Flaubert model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaining positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
class AutoModelForQuestionAnswering(object):
r"""
:class:`~transformers.AutoModelForQuestionAnswering` is a generic model class
that will be instantiated as one of the question answering model classes of the library
when created with the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)`
class method.
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForQuestionAnswering is designed to be instantiated "
"using the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForQuestionAnswering.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertForQuestionAnswering` (DistilBERT model)
- isInstance of `albert` configuration class: :class:`~transformers.AlbertForQuestionAnswering` (ALBERT model)
- isInstance of `bert` configuration class: :class:`~transformers.BertModelForQuestionAnswering` (Bert model)
- isInstance of `xlnet` configuration class: :class:`~transformers.XLNetForQuestionAnswering` (XLNet model)
- isInstance of `xlm` configuration class: :class:`~transformers.XLMForQuestionAnswering` (XLM model)
- isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertForQuestionAnswering` (XLM model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: :class:`~transformers.DistilBertForQuestionAnswering` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForQuestionAnswering` (ALBERT model)
- contains `bert`: :class:`~transformers.BertForQuestionAnswering` (Bert model)
- contains `xlnet`: :class:`~transformers.XLNetForQuestionAnswering` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMForQuestionAnswering` (XLM model)
- contains `flaubert`: :class:`~transformers.FlaubertForQuestionAnswering` (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForQuestionAnswering.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
class AutoModelForTokenClassification:
r"""
:class:`~transformers.AutoModelForTokenClassification` is a generic model class
that will be instantiated as one of the token classification model classes of the library
when created with the `AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)`
class method.
This class cannot be instantiated using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForTokenClassification is designed to be instantiated "
"using the `AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForTokenClassification.from_config(config)` methods."
)
@classmethod
def from_config(cls, config):
r""" Instantiates one of the base model classes of the library
from a configuration.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
- isInstance of `distilbert` configuration class: :class:`~transformers.DistilBertModelForTokenClassification` (DistilBERT model)
- isInstance of `xlm` configuration class: :class:`~transformers.XLMForTokenClassification` (XLM model)
- isInstance of `xlm roberta` configuration class: :class:`~transformers.XLMRobertaModelForTokenClassification` (XLMRoberta model)
- isInstance of `bert` configuration class: :class:`~transformers.BertModelForTokenClassification` (Bert model)
- isInstance of `albert` configuration class: :class:`~transformers.AlbertForTokenClassification` (AlBert model)
- isInstance of `xlnet` configuration class: :class:`~transformers.XLNetModelForTokenClassification` (XLNet model)
- isInstance of `camembert` configuration class: :class:`~transformers.CamembertModelForTokenClassification` (Camembert model)
- isInstance of `roberta` configuration class: :class:`~transformers.RobertaModelForTokenClassification` (Roberta model)
- isInstance of `electra` configuration class: :class:`~transformers.ElectraForTokenClassification` (Electra model)
Examples::
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
model = AutoModelForTokenClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
"""
for config_class, model_class in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r""" Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: :class:`~transformers.DistilBertForTokenClassification` (DistilBERT model)
- contains `xlm`: :class:`~transformers.XLMForTokenClassification` (XLM model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForTokenClassification` (XLM-RoBERTa?Para model)
- contains `camembert`: :class:`~transformers.CamembertForTokenClassification` (Camembert model)
- contains `bert`: :class:`~transformers.BertForTokenClassification` (Bert model)
- contains `xlnet`: :class:`~transformers.XLNetForTokenClassification` (XLNet model)
- contains `roberta`: :class:`~transformers.RobertaForTokenClassification` (Roberta model)
- contains `electra`: :class:`~transformers.ElectraForTokenClassification` (Electra model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path:
Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForTokenClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()),
)
)
| [] |
2024-01-10 | arunrajanrv1996/voiceapp | application~controllers.py | from flask import current_app as app, render_template, jsonify, request,g
import os, json
import subprocess
from werkzeug.security import check_password_hash, generate_password_hash
from application.models import db, User, UserTranscription,UserRoles
from flask_security import current_user,auth_required
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import datetime
import spacy
from collections import Counter
from openai import OpenAI
from application.email import send_email_user
from jinja2 import Template
import random
from flask_jwt_extended import create_access_token, jwt_required, get_jwt_identity
# Set the OpenAI API key
api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=api_key) # Initialize the OpenAI client
nlp = spacy.load("en_core_web_sm") # Load the spaCy model
# Define the home page route
@app.route('/')
def index():
return render_template('index.html')
# Define the dictionary of user information
def cuser_to_dict(user):
return {
'id': user.id,
'username': user.username,
'email': user.email,
}
# Define the dictionary of user information
def puser_to_dict(user):
return {
'id': user.id,
'username': user.username,
'email': user.email,
'image': user.image,
}
# Define the dictionary of usertranscription information
def transcript_to_dict(transcript):
return {
'id': transcript.id,
'text': transcript.transcription,
'language': transcript.language,
'user_id': transcript.user_id,
'created_on': transcript.time,
}
# Define the route for user deletion
@app.route('/deleteuser/', methods=['DELETE'])
@jwt_required()
def deleteuser():
id = get_jwt_identity()
user=User.query.filter_by(id=id).first()
if not user:
return jsonify({'message': 'No user found!'})
usertranscript=UserTranscription.query.filter_by(user_id=id).all()
role = UserRoles.query.filter_by(user_id=id).first()
db.session.delete(role)
db.session.commit()
for i in usertranscript:
db.session.delete(i)
db.session.commit()
db.session.delete(user)
db.session.commit()
return jsonify({'message': 'User deleted successfully!'})
# Define the route for user resetpassword
@app.route('/resetpassword', methods=['PUT','POST'])
def resetpassword():
if request.method=='POST':
post_data = request.get_json()
email = post_data.get('email')
user = User.query.filter_by(email=email).first()
genotp= random.randint(100000,999999)
if not user:
return jsonify({'message': 'No user found!'})
with open('templates/reset.html') as file_:
template = Template(file_.read())
message = template.render(otp=genotp)
send_email_user(
to=email,
sub="Password Reset",
message=message
)
return jsonify({'message': 'Password sent successfully!', 'otp': genotp, 'email': email})
if request.method=='PUT':
post_data = request.get_json()
email = post_data.get('email')
user = User.query.filter_by(email=email).first()
if not user:
return jsonify({'message': 'No user found!'})
password = generate_password_hash(post_data.get('password'))
user.password=password
db.session.commit()
return jsonify({'message': 'Password reset successfully!'})
# Define the route for user login
@app.route('/userlogin', methods=['POST'])
def userlogin():
post_data = request.get_json()
username = post_data.get('username')
password = post_data.get('password')
with app.app_context():
user_datastore = app.security.datastore
user = User.query.filter_by(username=username).first()
if not user:
app.logger.info(f"No user found for username: {username}")
return jsonify({'message': 'No user found!'})
if check_password_hash(user.password, password):
app.logger.info("Password validation successful")
access_token = create_access_token(identity=user.id)
return jsonify({"token": access_token})
else:
app.logger.warning("Password validation failed")
return jsonify({"message": "Wrong Password"})
# Define the route for user profile
@app.route("/userprofile/", methods=['POST','PUT','GET'])
@jwt_required()
def userprofile():
id = get_jwt_identity()
if request.method=='GET':
user=User.query.filter_by(id=id).first()
return jsonify(puser_to_dict(user))
if request.method=='PUT':
post_data = request.get_json()
image = post_data.get('image')
password = post_data.get('password')
user=User.query.filter_by(id=id).first()
if not user:
return jsonify({'message': 'No user logged in'})
if image:
user.image=image
db.session.commit()
if password:
user.password=generate_password_hash(password)
db.session.commit()
return jsonify({'message': 'User updated successfully!'})
# Define the route for currentuser
@app.route('/currentuser/')
@jwt_required()
def currentuser():
user=User.query.filter_by(id=get_jwt_identity()).first()
if not user:
return jsonify({'message': 'No user logged in'})
return jsonify(cuser_to_dict(user))
# Define the route for user creation and listing
@app.route('/createuser/')
def createuser():
user=User.query.all()
return jsonify([cuser_to_dict(user) for user in user])
# Define the route for user creation
@app.route('/registeruser/', methods=['POST'])
def registeruser():
post_data = request.get_json()
username = post_data.get('username')
email = post_data.get('email')
password = post_data.get('password')
image = post_data.get('image')
if not username:
return jsonify({'message': 'Username is required'})
if not email:
return jsonify({'message': 'Email is required'})
if not password:
return jsonify({'message': 'Password is required'})
user = User.query.filter_by(username=username,email=email).first()
if user:
return jsonify({'message': 'Username already exists'})
with app.app_context():
user_datastore = app.security.datastore
if not user_datastore.find_user(username=username) and not user_datastore.find_user(email=email):
user_datastore.create_user(username=username, email=email,image=image, password=generate_password_hash(password))
db.session.commit()
user = user_datastore.find_user(username=username)
role = user_datastore.find_role('user')
user_datastore.add_role_to_user(user, role)
db.session.commit()
return jsonify({'message': 'User created successfully!'})
# Define the route for usertanscription
@app.route('/usertranscript/')
@jwt_required()
def usertranscript():
user=UserTranscription.query.filter_by(user_id=get_jwt_identity()).order_by(UserTranscription.time.desc()).limit(30)
return jsonify([transcript_to_dict(user) for user in user])
# Define the route for usertanscriptionanalysis
@app.route('/usertranscriptanalysis/')
@jwt_required()
def compute_frequent_words_and_phrases():
user_id = get_jwt_identity()
# Calculate the most frequently used words for the current user
user_transcriptions = UserTranscription.query.filter_by(user_id=user_id).all()
all_transcriptions = " ".join([transcription.transcription for transcription in user_transcriptions])
doc = nlp(all_transcriptions)
frequent_words = [token.text for token in doc if token.is_alpha and not token.is_stop]
frequent_words_counter = Counter(frequent_words)
frequent_words_user = dict(frequent_words_counter.most_common(10)) # Adjust the number as needed
# Calculate the most frequently used words across all users
all_transcriptions = " ".join([transcription.transcription for transcription in UserTranscription.query.all()])
doc_all_users = nlp(all_transcriptions)
frequent_words_all_users = Counter([token.text for token in doc_all_users if token.is_alpha and not token.is_stop])
frequent_words_all_users = dict(frequent_words_all_users.most_common(10)) # Adjust the number as needed
return jsonify({'frequent_words_user': frequent_words_user, 'frequent_words_all_users': frequent_words_all_users})
# Define the route for useruniquephrases
@app.route('/useruniquephrases/')
@jwt_required()
def get_user_unique_phrases():
user_id = get_jwt_identity()
# Retrieve all transcriptions for the current user
user_transcriptions = UserTranscription.query.filter_by(user_id=user_id).all()
# Extract and count phrases from the transcriptions
all_phrases = []
for transcription in user_transcriptions:
phrases = extract_phrases(transcription.transcription)
all_phrases.extend(phrases)
# Count the frequency of each phrase
phrase_counts = Counter(all_phrases)
# Extract unique phrases used only once
unique_phrases = [phrase for phrase, count in phrase_counts.items() if count == 1]
# Return the first 3 unique phrases (or all if there are fewer than 3)
return jsonify({'user_unique_phrases': unique_phrases[:3]})
def extract_phrases(text):
# You can customize this function based on your requirements for extracting phrases
doc = nlp(text)
phrases = [chunk.text for chunk in doc.noun_chunks if len(chunk.text.split()) >= 2]
return phrases
# Define the route for similarusers
@app.route('/similarusers/')
@jwt_required()
def find_similar_users():
current_user_id = get_jwt_identity()
# Retrieve transcriptions for the current user
current_user_transcriptions = UserTranscription.query.filter_by(user_id=current_user_id).all()
if len(current_user_transcriptions) == 0:
return jsonify({'similar_users': []})
# Extract text from transcriptions
current_user_text = " ".join([transcription.transcription for transcription in current_user_transcriptions])
# Retrieve transcriptions for all users (excluding the current user)
all_users_transcriptions = UserTranscription.query.filter(UserTranscription.user_id != current_user_id).all()
if len(all_users_transcriptions) == 0:
return jsonify({'similar_users': []})
# Create a list of user texts
all_users_texts = [" ".join([transcription.transcription for transcription in UserTranscription.query.filter_by(user_id=user_transcription.user_id).all()]) for user_transcription in all_users_transcriptions]
# Calculate TF-IDF vectors for the current user and all users
vectorizer = TfidfVectorizer()
current_user_vector = vectorizer.fit_transform([current_user_text])
all_users_vectors = vectorizer.transform(all_users_texts)
# Calculate cosine similarity between the current user and all users
similarities = cosine_similarity(current_user_vector, all_users_vectors)[0]
# Get the indices of users with the highest similarity
most_similar_user_indices = similarities.argsort()[:-4:-1] # Get top 3 most similar users
# Retrieve user information for the most similar users
most_similar_users = [User.query.get(all_users_transcriptions[i].user_id) for i in most_similar_user_indices]
# Convert user information to a dictionary format
similar_users_info = []
for i in range(len(most_similar_users)):
if len(similar_users_info)==5:
break
if most_similar_users[i].username != User.query.get(current_user_id).username:
similar_users_info.append(most_similar_users[i].username)
similar_users_info=list(set(similar_users_info))
return jsonify({'similar_users': similar_users_info})
# Define the route for speech to text conversion
@app.route('/speech/<lang>', methods=['POST'])
def speech(lang):
user_id = request.form.get('user_id')
audio_file = request.files['audio']
# Create the directory if it doesn't exist
audio_dir = os.path.join(app.root_path, 'static', 'js', 'audio')
os.makedirs(audio_dir, exist_ok=True)
# Save the audio file to a known location with Ogg extension
audio_file_path = os.path.join(audio_dir, 'audio.ogg')
audio_file.save(audio_file_path)
audio_file_size_bytes = os.path.getsize(audio_file_path)
# Convert the file size to MB
audio_file_size_mb = audio_file_size_bytes / (1024 * 1024)
# Check if the file size is larger than 25 MB
if audio_file_size_mb > 5:
return jsonify({'text': 'File size is larger than 5 MB'})
audio_file_open = open(audio_file_path, "rb")
try:
if lang=="English":
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file_open,
response_format="json",
prompt="i am talking in"+lang
)
elif lang=="":
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file_open,
response_format="json"
)
else:
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file_open,
response_format="json",
prompt="i am talking in"+lang
)
if user_id!='':
user_transcription = UserTranscription(user_id=user_id, transcription=transcript.text, language=lang, time=datetime.datetime.now())
db.session.add(user_transcription)
db.session.commit()
return jsonify({'text': transcript.text})
except Exception as e:
print(e)
return jsonify({'text': 'Error in transcription'})
finally:
audio_file_open.close()
os.remove(audio_file_path)
| [] |
2024-01-10 | HakierGrzonzo/ClipSelect3 | backend~app~chromadb.py | from collections.abc import Iterable
from logging import getLogger
import string
from chromadb import PersistentClient
from os import environ
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from app.models import Media, SearchResult, SearchResultMetaData
EMBEDDING_MODEL = "text-embedding-ada-002"
OPEN_AI_KEY = environ["OPENAI_KEY"]
logger = getLogger(__name__)
client = PersistentClient(path="./chroma")
embedding_function = OpenAIEmbeddingFunction(
api_key=OPEN_AI_KEY, model_name=EMBEDDING_MODEL
)
def get_id_for_series_name(name: str):
name_l = [x for x in name if x.lower() in string.ascii_lowercase]
return "".join(name_l)[:20]
def enroll_episode(episode: Media, series_name: str, season_ordinal: int):
collection_id = get_id_for_series_name(series_name)
logger.info(f"Enrolling {episode.name} in {collection_id}")
collection = client.get_or_create_collection(
collection_id, embedding_function=embedding_function
)
ids = [
f"{season_ordinal}-{episode.ordinal}-{i}"
for i, _ in enumerate(episode.captions)
]
documents = [caption.text for caption in episode.captions]
metadatas = [
SearchResultMetaData(
episode=episode.ordinal,
season=season_ordinal,
series_name=series_name,
caption=i,
start=caption.start,
stop=caption.stop,
).model_dump()
for i, caption in enumerate(episode.captions)
]
collection.add(documents=documents, ids=ids, metadatas=metadatas)
def query_series(series_name: str, query: str) -> Iterable[SearchResult]:
logger.info(f"Quering {series_name} with {query}")
collection = client.get_collection(
get_id_for_series_name(series_name),
embedding_function=embedding_function,
)
results = collection.query(query_texts=query)
return [
SearchResult(**meta, text=text)
for meta, text in zip(results["metadatas"][0], results["documents"][0])
]
| [] |
2024-01-10 | plxgwalker/Suvorov.LNU.Twitter-Clone | Suvorov.LNU.TwitterClone.Script~script.py | import openai
import json
import sys
with open("D:\\twitter-clone-new\\config.json") as f:
config = json.load(f)
OPENAI_API_KEY = config["OPENAI_API_KEY"]
openai.api_key = OPENAI_API_KEY
user_response = " ".join(sys.argv[1:])
twitter_writer_prompt = (
"You are going to be Twitter writer. "
"Here is my idea, about which I would like to write. "
"Your main goal is to write me a tweet which is going to be viral. "
"Style of text should be polite. Max tweet characters is 100. "
"Do not write any comments to tweet, only tweet text. Idea: "
)
def create_tweet(text: str) -> str:
prompt = twitter_writer_prompt + text
openai_response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=100,
temperature=0
)
result = openai_response.choices[0].text
return result.strip()
result = create_tweet(user_response)
print(result)
| [
"You are going to be Twitter writer. Here is my idea, about which I would like to write. Your main goal is to write me a tweet which is going to be viral. Style of text should be polite. Max tweet characters is 100. Do not write any comments to tweet, only tweet text. Idea: ",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | elichad/Caster | tests~testrunner.py | import os
import sys
import unittest
import six
if six.PY2:
import logging
logging.basicConfig()
if os.path.dirname(os.path.dirname(os.path.abspath(__file__))) not in sys.path:
sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dragonfly import get_engine
from castervoice.lib.ctrl.mgr.errors.guidance_rejection import GuidanceRejectionException
from castervoice.lib.util import guidance
from tests.test_util import settings_mocking, utilities_mocking
def reject_file_writing():
raise GuidanceRejectionException()
def get_master_suite():
return unittest.defaultTestLoader.discover(os.path.dirname(__file__))
def run_tests():
get_engine("text")
settings_mocking.prevent_initialize()
utilities_mocking.mock_toml_files()
return unittest.TextTestRunner(verbosity=2).run(get_master_suite())
if __name__ == '__main__':
guidance.offer = reject_file_writing
result = run_tests()
sys.exit(len(result.failures) + len(result.errors) + len(result.unexpectedSuccesses))
| [] |
2024-01-10 | elichad/Caster | castervoice~lib~settings.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from builtins import str
import collections
import io
import os
import sys
import tomlkit
from past.builtins import xrange
from castervoice.lib import printer
from castervoice.lib import version
from castervoice.lib.util import guidance
from appdirs import *
import six
if six.PY2:
from castervoice.lib.util.pathlib import Path
else:
from pathlib import Path # pylint: disable=import-error
# consts: some of these can easily be moved out of this file
GENERIC_HELP_MESSAGE = """
If you continue having problems with this or any other issue you can contact
us through Gitter at <https://gitter.im/dictation-toolbox/Caster> or on our GitHub
issue tracker at <https://github.com/dictation-toolbox/Caster/issues>.
Thank you for using Caster!
"""
SOFTWARE_VERSION_NUMBER = version.__version__
SOFTWARE_NAME = "Caster v " + SOFTWARE_VERSION_NUMBER
HOMUNCULUS_VERSION = "HMC v " + SOFTWARE_VERSION_NUMBER
HMC_TITLE_RECORDING = " :: Recording Manager"
HMC_TITLE_DIRECTORY = " :: Directory Selector"
HMC_TITLE_CONFIRM = " :: Confirm"
LEGION_TITLE = "legiongrid"
RAINBOW_TITLE = "rainbowgrid"
DOUGLAS_TITLE = "douglasgrid"
SUDOKU_TITLE = "sudokugrid"
SETTINGS_WINDOW_TITLE = "Caster Settings Window v "
QTYPE_DEFAULT = "0"
QTYPE_INSTRUCTIONS = "3"
QTYPE_RECORDING = "4"
QTYPE_DIRECTORY = "5"
QTYPE_CONFIRM = "6"
WXTYPE_SETTINGS = "7"
HMC_SEPARATOR = "[hmc]"
# calculated fields
SETTINGS = None
SYSTEM_INFORMATION = None
WSR = False
_BASE_PATH = None
_USER_DIR = None
_SETTINGS_PATH = None
def _get_platform_information():
"""Return a dictionary containing platform-specific information."""
import sysconfig
system_information = {"platform": sysconfig.get_platform()}
system_information.update({"python version": sys.version_info})
if sys.platform == "win32":
system_information.update({"binary path": sys.exec_prefix})
system_information.update(
{"main binary": str(Path(sys.exec_prefix).joinpath("python.exe"))})
system_information.update(
{"hidden console binary": str(Path(sys.exec_prefix).joinpath("pythonw.exe"))})
else:
system_information.update({"binary path": str(Path(sys.exec_prefix).joinpath(sys.exec_prefix).joinpath("bin"))})
system_information.update(
{"main binary": str(Path(sys.exec_prefix).joinpath("bin", "python"))})
system_information.update(
{"hidden console binary": str(Path(sys.exec_prefix).joinpath("bin", "python"))})
return system_information
def get_filename():
return _SETTINGS_PATH
def _validate_engine_path():
'''
Validates path 'Engine Path' in settings.toml
'''
if not sys.platform.startswith('win'):
return ''
try:
import natlink # pylint: disable=import-error
except ImportError:
return ''
if os.path.isfile(_SETTINGS_PATH):
with io.open(_SETTINGS_PATH, "rt", encoding="utf-8") as toml_file:
data = tomlkit.loads(toml_file.read()).value
engine_path = data["paths"]["ENGINE_PATH"]
if os.path.isfile(engine_path):
return engine_path
else:
engine_path = _find_natspeak()
data["paths"]["ENGINE_PATH"] = engine_path
try:
formatted_data = str(tomlkit.dumps(data))
with io.open(_SETTINGS_PATH, "w", encoding="utf-8") as toml_file:
toml_file.write(formatted_data)
printer.out("Setting engine path to {}".format(engine_path))
except Exception as e:
printer.out("Error saving settings file {} {} ".format(e, _SETTINGS_PATH))
return engine_path
else:
return _find_natspeak()
def _find_natspeak():
'''
Finds engine 'natspeak.exe' path and verifies supported DNS versions via Windows Registry.
'''
try:
if six.PY2:
import _winreg as winreg
else:
import winreg
except ImportError:
printer.out("Could not import winreg")
return ""
printer.out("Searching Windows Registry For DNS...")
proc_arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()
try:
proc_arch64 = os.environ['PROCESSOR_ARCHITEW6432'].lower()
except KeyError:
proc_arch64 = False
if proc_arch == 'x86' and not proc_arch64:
arch_keys = {0}
elif proc_arch == 'x86' or proc_arch == 'amd64':
arch_keys = {winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY}
else:
raise Exception("Unhandled arch: %s" % proc_arch)
for arch_key in arch_keys:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall",
0, winreg.KEY_READ | arch_key)
for i in xrange(0, winreg.QueryInfoKey(key)[0]):
skey_name = winreg.EnumKey(key, i)
skey = winreg.OpenKey(key, skey_name)
DisplayName, Publisher, DisplayVersion, InstallLocation = 'null'
try:
DisplayName = winreg.QueryValueEx(skey, 'DisplayName')[0]
Publisher = winreg.QueryValueEx(skey, 'Publisher')[0]
DisplayVersion = winreg.QueryValueEx(skey, 'DisplayVersion')[0]
InstallLocation = winreg.QueryValueEx(skey, 'InstallLocation')[0]
except OSError as error:
if error.errno == 2: # Suppresses '[Error 2] The system cannot find the file specified'
pass
else:
printer.out(error)
finally:
skey.Close()
if Publisher == "Nuance Communications Inc." and "Dragon" in DisplayName:
DnsVersion = int(str(DisplayVersion)[:2])
if DnsVersion >= 13:
engine_path = str(Path(InstallLocation).joinpath("Program/natspeak.exe"))
if os.path.isfile(engine_path):
printer.out("Search Complete.")
return engine_path
else:
printer.out(
"Dragon Naturally Speaking {} is not supported by Caster. Only versions 13 and above are supported. Purchase Dragon Naturally Speaking 13 or above"
.format(DnsVersion))
printer.out("Cannot find dragon engine path")
return ""
def _save(data, path):
"""
Only to be used for settings file.
:param data:
:param path:
:return:
"""
guidance.offer()
try:
formatted_data = str(tomlkit.dumps(data))
with io.open(path, "wt", encoding="utf-8") as f:
f.write(formatted_data)
except Exception as e:
printer.out("Error saving toml file: {} {}".format(e, _SETTINGS_PATH))
def _init(path):
guidance.offer()
result = {}
try:
with io.open(path, "rt", encoding="utf-8") as f:
result = tomlkit.loads(f.read()).value
except ValueError as e:
printer.out("\n\n {} while loading settings file: {} \n\n".format(repr(e), path))
printer.out(sys.exc_info())
except IOError as e:
printer.out("\n\n {} while loading settings file: {} \nAttempting to recover...\n\n".format(repr(e), path))
default_settings = _get_defaults()
result, num_default_added = _deep_merge_defaults(result, default_settings)
if num_default_added > 0:
printer.out("Default settings values added: {} ".format(num_default_added))
_save(result, _SETTINGS_PATH)
return result
def _deep_merge_defaults(data, defaults):
"""
Recursivly merge data and defaults, preferring data.
Only handles nested dicts and scalar values.
Modifies `data` in place.
"""
changes = 0
for key, default_value in defaults.items():
# If the key is in the data, use that, but call recursivly if it's a dict.
if key in data:
if isinstance(data[key], collections.Mapping):
child_data, child_changes = _deep_merge_defaults(data[key], default_value)
data[key] = child_data
changes += child_changes
else:
data[key] = default_value
changes += 1
return data, changes
def _get_defaults():
terminal_path_default = "C:/Program Files/Git/git-bash.exe"
if not os.path.isfile(terminal_path_default):
terminal_path_default = ""
ahk_path_default = "C:/Program Files/AutoHotkey/AutoHotkey.exe"
if not os.path.isfile(ahk_path_default):
ahk_path_default = ""
return {
"paths": {
"BASE_PATH":
_BASE_PATH,
"USER_DIR":
_USER_DIR,
# pathlib string conversion can be removed once pathlib is utilized throughout Caster.
# DATA
"SM_BRINGME_PATH":
str(Path(_USER_DIR).joinpath("settings/sm_bringme.toml")),
"SM_ALIAS_PATH":
str(Path(_USER_DIR).joinpath("data/sm_aliases.toml")),
"SM_CHAIN_ALIAS_PATH":
str(Path(_USER_DIR).joinpath("data/sm_chain_aliases.toml")),
"SM_HISTORY_PATH":
str(Path(_USER_DIR).joinpath("data/sm_history.toml")),
"RULES_CONFIG_PATH":
str(Path(_USER_DIR).joinpath("settings/rules.toml")),
"TRANSFORMERS_CONFIG_PATH":
str(Path(_USER_DIR).joinpath("settings/transformers.toml")),
"HOOKS_CONFIG_PATH":
str(Path(_USER_DIR).joinpath("settings/hooks.toml")),
"COMPANION_CONFIG_PATH":
str(Path(_USER_DIR).joinpath("settings/companion_config.toml")),
"DLL_PATH":
str(Path(_BASE_PATH).joinpath("lib/dll/")),
"GDEF_FILE":
str(Path(_USER_DIR).joinpath("transformers/words.txt")),
"LOG_PATH":
str(Path(_USER_DIR).joinpath("log.txt")),
"SAVED_CLIPBOARD_PATH":
str(Path(_USER_DIR).joinpath("data/clipboard.json")),
"SIKULI_SCRIPTS_PATH":
str(Path(_USER_DIR).joinpath("sikuli")),
"GIT_REPO_LOCAL_REMOTE_PATH":
str(Path(_USER_DIR).joinpath("settings/git_repo_local_to_remote_match.toml")),
"GIT_REPO_LOCAL_REMOTE_DEFAULT_PATH":
str(Path(_BASE_PATH).joinpath("bin/share/git_repo_local_to_remote_match.toml.defaults")),
# REMOTE_DEBUGGER_PATH is the folder in which pydevd.py can be found
"REMOTE_DEBUGGER_PATH":
str(Path("")),
# SIKULIX EXECUTABLES
"SIKULI_IDE":
str(Path("")),
"SIKULI_RUNNER":
str(Path("")),
# EXECUTABLES
"AHK_PATH":
str(Path(_BASE_PATH).joinpath(ahk_path_default)),
"DOUGLAS_PATH":
str(Path(_BASE_PATH).joinpath("asynch/mouse/grids.py")),
"ENGINE_PATH":
_validate_engine_path(),
"HOMUNCULUS_PATH":
str(Path(_BASE_PATH).joinpath("asynch/hmc/h_launch.py")),
"LEGION_PATH":
str(Path(_BASE_PATH).joinpath("asynch/mouse/legion.py")),
"MEDIA_PATH":
str(Path(_BASE_PATH).joinpath("bin/media")),
"RAINBOW_PATH":
str(Path(_BASE_PATH).joinpath("asynch/mouse/grids.py")),
"REBOOT_PATH":
str(Path(_BASE_PATH).joinpath("bin/reboot.bat")),
"REBOOT_PATH_WSR":
str(Path(_BASE_PATH).joinpath("bin/reboot_wsr.bat")),
"SETTINGS_WINDOW_PATH":
str(Path(_BASE_PATH).joinpath("asynch/settingswindow.py")),
"SIKULI_SERVER_PATH":
str(Path(_BASE_PATH).joinpath("asynch/sikuli/server/xmlrpc_server.sikuli")),
"SUDOKU_PATH":
str(Path(_BASE_PATH).joinpath("asynch/mouse/grids.py")),
"WSR_PATH":
str(Path(_BASE_PATH).joinpath("C:/Windows/Speech/Common/sapisvr.exe")),
"TERMINAL_PATH":
str(Path(terminal_path_default)),
# CCR
"CONFIGDEBUGTXT_PATH":
str(Path(_USER_DIR).joinpath("data/configdebug.txt")),
# PYTHON
"PYTHONW":
SYSTEM_INFORMATION["hidden console binary"],
},
# Speech recognition engine settings
"engine": {
"default_engine_mode": False,
"engine_mode": "normal",
"default_mic": False,
"mic_mode": "on",
"mic_sleep_timer": 120, # Seconds before microphone goes to sleep after last successful recognition.
# Note: No greater than 5 minutes for DPI/DPI
},
# python settings
"python": {
"automatic_settings":
True, # Set to false to manually set "version" and "pip" below.
"version":
"python", # Depending Python setup (python, python2, python2.7, py, py -2)
"pip": "pip" # Depending on PIP setup (pip ,pip2, pip2.7)
},
# sikuli settings
"sikuli": {
"enabled": False,
"version": ""
},
# gitbash settings
"gitbash": {
"loading_time": 5, # the time to initialise the git bash window in seconds
"fetching_time": 3 # the time to fetch a github repository in seconds
},
# node rules path
"Tree_Node_Path": {
"SM_CSS_TREE_PATH": str(Path(_USER_DIR).joinpath("data/sm_css_tree.toml")),
},
"online": {
"online_mode": True, # False disables updates
"last_update_date": "None",
"update_interval": 7 # Days
},
# Default enabled hooks: Use hook class name
"hooks": {
"default_hooks": ['PrinterHook'],
},
# miscellaneous section
"miscellaneous": {
"dev_commands": True,
"keypress_wait": 50, # milliseconds
"max_ccr_repetitions": 16,
"atom_palette_wait": 30, # hundredths of a second
"integer_remap_opt_in": False,
"short_integer_opt_out": False,
"integer_remap_crash_fix": False,
"print_rdescripts": True,
"history_playback_delay_secs": 1.0,
"legion_vertical_columns": 30,
"legion_downscale_factor": "auto",
"use_aenea": False,
"hmc": True,
"ccr_on": True,
"dragonfly_pause_default": 0.003, # dragonfly _pause_default 0.02 is too slow! Caster default 0.003
},
# Grammar reloading section
"grammar_reloading": {
"reload_trigger": "timer", # manual or timer
"reload_timer_seconds": 5, # seconds
},
"formats": {
"_default": {
"text_format": [5, 0],
"secondary_format": [1, 0],
},
"C plus plus": {
"text_format": [3, 1],
"secondary_format": [2, 1],
},
"C sharp": {
"text_format": [3, 1],
"secondary_format": [2, 1],
},
"Dart": {
"text_format": [3, 1],
"secondary_format": [2, 1],
},
"HTML": {
"text_format": [5, 0],
"secondary_format": [5, 2],
},
"Java": {
"text_format": [3, 1],
"secondary_format": [2, 1],
},
"Javascript": {
"text_format": [3, 1],
"secondary_format": [2, 1],
},
"matlab": {
"text_format": [3, 1],
"secondary_format": [1, 3],
},
"Python": {
"text_format": [5, 3],
"secondary_format": [2, 1],
},
"Rust": {
"text_format": [5, 3],
"secondary_format": [2, 1],
},
"sequel": {
"text_format": [5, 3],
"secondary_format": [1, 3],
},
}
}
def settings(key_path, default_value=None):
"""
This should be the preferred way to use settings.SETTINGS,
a KeyError-safe function call to access the settings dict.
"""
dv = False if default_value is None else default_value
if SETTINGS is None:
return dv
value = SETTINGS
for k in key_path:
if k in value:
value = value[k]
else:
return dv
return value
def save_config():
"""
Save the current in-memory settings to disk
"""
_save(SETTINGS, _SETTINGS_PATH)
def initialize():
global SETTINGS, SYSTEM_INFORMATION
global _BASE_PATH, _USER_DIR, _SETTINGS_PATH
if SETTINGS is not None:
return
# calculate prerequisites
SYSTEM_INFORMATION = _get_platform_information()
_BASE_PATH = str(Path(__file__).resolve().parent.parent)
_USER_DIR = user_data_dir(appname="caster", appauthor=False)
_SETTINGS_PATH = str(Path(_USER_DIR).joinpath("settings/settings.toml"))
for directory in ["data", "rules", "transformers", "hooks", "sikuli", "settings"]:
d = Path(_USER_DIR).joinpath(directory)
d.mkdir(parents=True, exist_ok=True)
# Kick everything off.
SETTINGS = _init(_SETTINGS_PATH)
_debugger_path = SETTINGS["paths"]["REMOTE_DEBUGGER_PATH"] # pylint: disable=invalid-sequence-index
if _debugger_path not in sys.path and os.path.isdir(_debugger_path):
sys.path.append(_debugger_path)
printer.out("Caster User Directory: {}".format(_USER_DIR))
| [] |
2024-01-10 | V-Sense/Aesthetic-Image-Captioning-ICCVW-2019 | lda_with_n_grams.py | from __future__ import print_function
import json
import io
import pdb
from gensim import corpora
import itertools
from collections import Counter
from nltk.corpus import stopwords
import gensim
from gensim.models.coherencemodel import CoherenceModel
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet
import numpy as np
import visdom
import os
from random import shuffle
from torchvision import transforms
from PIL import Image
import torch
all_tokens = []
comment_file = 'CLEAN_AVA_FULL_AFTER_SUBJECTIVE_CLEANING.json'
img_src = '/home/koustav/Datasets/AVADataSet/'
db = json.load(io.open(comment_file, 'r', encoding = 'utf-8'))
imgs = db['images']
lda_vocab = io.open('LDA_VOCAB.txt','w', encoding = 'utf-8')
discarded_uni = io.open('Discarded_Unigrams.txt','w', encoding = 'utf-8')
discarded_bi = io.open('Discarded_Bigrams.txt','w', encoding = 'utf-8')
stop = (set(stopwords.words('english')) | set(['.','?', '!', ','])) - set(['above', 'below', 'before', 'after', 'too', 'very'])
discarded_uni_list = []
discarded_bi_list = []
lemmatizer = WordNetLemmatizer()
def prepare_visuals(image_list):
topic, image_list = image_list
def splitter(n, s):
pieces = s.split(' ')
return(" ".join(pieces[i:i+n]) for i in xrange(0, len(pieces), n))
global vis_1
transform_to_tensor = transforms.ToTensor()
T_tensor = []
caption = 'Topic Number : ' + unicode(topic) + ' '
for c, img in enumerate(image_list):
I = Image.open(img[0]).convert('RGB')
caption += '( ' + unicode(c) + ' ) ' + img[1] + ' '
T_tensor.append(transform_to_tensor(I.resize([128,128])))
vis_1.images(torch.stack(T_tensor, 0), opts = {'nrow' : len(image_list) + 2, 'caption' : caption})
def filter_tokens(poss):
global discarded_uni_list, discarded_bi_list
n_flag = False
if len(poss) == 1:
if poss[0][1] in ['NOUN']:
n_flag = True
if not n_flag:
discarded_uni_list+= [poss[0][0]]
return n_flag
else:
if poss[0][1] in ['NOUN', 'ADJ', 'ADV'] and poss[1][1] in ['NOUN', 'ADJ']:
n_flag = True
if not n_flag:
discarded_bi_list += [poss[0][0] + ' ' + poss[1][0]]
return n_flag
def lemmatize(pos):
global lemmatizer
if pos[1] == 'NOUN':
return (lemmatizer.lemmatize(pos[0], wordnet.NOUN), pos[1])
elif pos[1] == 'VERB':
return (lemmatizer.lemmatize(pos[0], wordnet.VERB), pos[1])
elif pos[1] == 'ADJ':
return (lemmatizer.lemmatize(pos[0], wordnet.ADJ), pos[1])
elif pos[1] == 'ADV':
return (lemmatizer.lemmatize(pos[0], wordnet.ADV), pos[1])
else:
return pos
def collect_tokens(comment):
unigrams = comment['unigrams']
bigrams = comment['bigrams']
filtered_tokens = [[unigram] for unigram in unigrams] + bigrams
all_tokens = [' '.join([i[0] for i in n_grams]) for n_grams in filtered_tokens]
return all_tokens
def filter_comments_on_count(comment):
global cw
new_comment = []
for c in comment:
if (len(c.split()) == 1 and cw[c] > 5) or (len(c.split()) == 2 and cw[c] > 5): # only if unigram count is more than 5 and bigram count is more than 3
new_comment.append(c)
return new_comment
# Use/Modify this function depending on the strategy of how the CNN is trained. Move this to a different script if more convenient
def find_topic(text_corpus_clean_new, ldamodel, num_topics, topics_each_image_1):
global topics_word_list, topic_probs, topic_words
topic_labels = []
topics_word_list = ldamodel.print_topics(num_topics = num_topics, num_words=25)
topic_words = [[i.split('*')[1].strip().strip('\"') for i in temp_topic_words[1].split('+')] for temp_topic_words in topics_word_list]
topic_probs = [[float(i.split('*')[0].strip()) for i in temp_topic_words[1].split('+')] for temp_topic_words in topics_word_list]
for tokens, topics in zip(text_corpus_clean_new, topics_each_image_1):
this_topic_words = [topic_words[i] for i in topics]
n_matches = [len(set(tokens) & set(words)) for words in this_topic_words]
labels = [topics[i] for i in np.where(np.array(n_matches) == np.max(n_matches))[0]]
if len(labels) > 1:
first_w_probs = []
for lab in labels:
first_w_probs.append(topic_probs[lab][0])
topic_labels.append(labels[np.argmax(first_w_probs)])
else:
topic_labels.append(labels[0])
return topic_labels
def filter_topics (img_topic):
global ldamodel, topics_word_list, topic_probs
img, topic = img_topic
if topic_probs[topic][0] > 0.05:
img['Label_LDA'] = topic
return True
else:
return False
text_corpus_clean = []
list_of_clean_tokens = []
for count, img in enumerate(imgs):
if count % 1000 == 0:
print ('%d / %d images processed'%(count, len(imgs)))
comments = img['sentences']
all_tokens = map(collect_tokens, comments)
text_corpus_clean.append(all_tokens)
list_of_clean_tokens += list(itertools.chain.from_iterable(all_tokens))
lda_vocab.write("\n".join([unicode(i) + ' '+ unicode(j) for i,j in Counter(list_of_clean_tokens).most_common()]))
cw = Counter(list_of_clean_tokens)
text_corpus_clean_new = []
for count_1, comments in enumerate(text_corpus_clean):
#text_corpus_clean_new.append(list(itertools.chain.from_iterable(map(filter_comments_on_count, comments))))
text_corpus_clean_new.append(list(itertools.chain.from_iterable(comments)))
dictionary = corpora.Dictionary(text_corpus_clean_new)
dictionary.filter_extremes(no_below=30, no_above=0.10)
doc_term_matrix = [dictionary.doc2bow(doc) for doc in text_corpus_clean_new]
print ('dictionary shape : %d \ndoc_term_matrix shape : %d'%(len(dictionary), len(doc_term_matrix)))
print('Starting LDA')
Lda = gensim.models.ldamulticore.LdaMulticore
for num_topics in range(200,201)[::20]:
vis_1 = visdom.Visdom( env = 'Topic-Visualization_After_New_Subjectivity_'+ str(num_topics))
vis_1.close()
ldamodel = Lda(doc_term_matrix, num_topics = num_topics, id2word = dictionary, passes = 200, workers = 15, iterations = 5000, chunksize = 20000 )
cm = CoherenceModel(model=ldamodel, corpus=doc_term_matrix, coherence='u_mass')
cm_score = cm.get_coherence()
#pdb.set_trace()
topics_each_image_1 = np.argsort(np.array(ldamodel.get_document_topics(doc_term_matrix, minimum_probability = 0))[:,:,1], axis = 1)[:,-10:]
topics_each_image_2 = find_topic(text_corpus_clean_new, ldamodel, num_topics, topics_each_image_1)
#pdb.set_trace()
im_info = [(os.path.join(img_src, img['filename']),' || '.join([sent['clean'] for sent in img['sentences']])) for img in imgs]
topic_counter = Counter(topics_each_image_2).most_common()
image_list = []
for topic, count in topic_counter:
#pdb.set_trace()
indices = np.where(topics_each_image_2 == topic)[0]
shuffle(indices)
indices = indices[0:16]
image_list.append((topic, [im_info[i] for i in indices]))
#pdb.set_trace()
#pdb.set_trace()
topics = ldamodel.print_topics(num_topics = num_topics, num_words=25)
topic_summaries = [unicode(t) + ' ' + unicode(c) + ' '+ unicode(topics[t][1]) for t,c in topic_counter]
print ('%d : %f'%(num_topics, cm_score))
with io.open('Iterative_LDA/_temp_topics_iteration_'+str(num_topics)+'.txt','w', encoding = 'utf-8') as f1:
print (unicode(cm_score), file = f1)
print('\n'.join(topic_summaries), file= f1)
map(prepare_visuals, image_list)
new_imgs = [img[0] for img in filter(filter_topics, zip(imgs, topics_each_image_2))]
labels = [img['Label_LDA'] for img in new_imgs]
label_hash = dict(zip(Counter(labels).keys(),range(len(Counter(labels).keys()))))
for img in new_imgs: img['Label_LDA'] = label_hash[img['Label_LDA']]
pdb.set_trace()
#with open('Iterative_LDA/_temp_topics_iteration_'+str(iteration)+'.txt','w') as f1:
# print('\n'.join(map(str,ldamodel.print_topics(num_topics = 50, num_words=20))), file= f1)
#print(Counter(np.argmax(prob_dist,axis = 1).tolist()), file= f1)
#pickle.dump([ldamodel,dictionary], open('Iterative_LDA/Models/LDA_AVA' + '50' + '_' + str(iteration) + '.p','w'))
#pdb.set_trace() | [] |
2024-01-10 | VAlduinV/portfolio | photoshare_api~src~repository~models.py | import openai
from fastapi import HTTPException, status
from src.conf.config import settings
from src.constants import *
openai.api_key = settings.openai_api_key
async def generate_image(description: str):
try:
url = openai.Image.create(
prompt=description,
n=1,
size="1024x1024"
)['data'][0]['url']
except Exception as err:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=NO_TOKENS)
return url
| [] |
2024-01-10 | SoulNaturalist/AutoModeratorTelegramChatGPT | examples~ban_with_counter.py | import openai
from googletrans import Translator
from aiogram import Bot, Dispatcher, executor, types
"""
Example with a counter instead of an instant ban
"""
class AutoModeration:
translator = Translator()
def __init__(self, openai_token: str, ban_words: list, ban: bool, language: str) -> None:
self.openai_token = openai_token
self.ban_words = ban_words
self.ban = ban
self.language = language
def gen_context_msg_gpt(self, msg: str, ban_words: str) -> str:
if self.ban_words:
return f"""Hi, read this message\n{msg} and if it contains at least one word of their list - {ban_words}\nAlso, do you think this message is spam?, say yes or no"""
else:
return """Determine whether this message is spam or not, if yes, write yes in the answer"""
def send_question_chatgpt(self, msg: str) -> bool:
if self.language == "ru":
content_to_chatgpt = self.translator.translate(self.gen_context_msg_gpt(msg, self.ban_words), src="ru", dest="en").text
else:
content_to_chatgpt = self.gen_context_msg_gpt(msg, self.ban_words)
openai.api_key = self.openai_token
messages = [{"role": "user", "content": content_to_chatgpt}]
chatgpt_response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
response_from_chatgpt = chatgpt_response["choices"][0]["message"]["content"]
return "Да" in response_from_chatgpt.lower() or "Yes" in response_from_chatgpt.lower() or "contains" in response_from_chatgpt.lower()
class TelegramBot:
ban_counter = {}
def __init__(self, telegram_token:str, counter_ban:int) -> None:
self.telegram_token = telegram_token
self.counter_for_ban = counter_ban
def start(self):
bot = Bot(token=self.telegram_token)
dp = Dispatcher(bot)
@dp.message_handler(chat_type=types.ChatType.SUPERGROUP)
async def spam_handler_supergroup(msg: types.Message):
user_id = msg["from"]["id"]
chat_id = msg["chat"]["id"]
msg_id = msg["message_id"]
moderation_class = AutoModeration("open ai token", ["bruh"], True, "ru")
is_spam = moderation_class.send_question_chatgpt(msg.text)
if is_spam and moderation_class.ban:
await bot.delete_message(chat_id, msg_id)
if user_id not in self.ban_counter:
self.ban_counter[user_id] = 1
else:
self.ban_counter[user_id] = self.ban_counter[user_id] + 1
if self.ban_counter[user_id] == self.counter_for_ban:
await bot.ban_chat_member(chat_id, user_id)
elif is_spam:
await bot.delete_message(chat_id, msg_id)
executor.start_polling(dp)
if __name__ == "__main__":
TelegramBot("telegram bot token", 3).start() | [] |
2024-01-10 | JJJ000/ai-docsy-copy | ai_helper.py | import openai
from pptx import Presentation
import json
github_link = "https://github.com/jstockwin/py-pdf-parser"
qn1 = "provide 1-sentence description for this github repo https://github.com/jstockwin/py-pdf-parser "
qn2 = "provide categories for this github repo https://github.com/jstockwin/py-pdf-parser and return in array of string format, with double quote"
qn3 = "write me a tech doc for this github repo https://github.com/jstockwin/py-pdf-parser,including 1 intro paragraph and 2-4 H2 headers, in markdown format."
basic_format = """---
categories: {}
tags: {}
title: {}
linkTitle: {}
date: 2023-02-27
description: {}
---
{}
"""
def askGPT(text):
openai.api_key = ""
completion = openai.Completion.create(
engine="text-davinci-003",
prompt=text,
max_tokens=2048,
n=1,
stop=None,
temperature=0.5,
)
r = completion.choices[0].text
print(r)
print('\n')
return r
def main():
print("start")
des = askGPT(qn1)
categories = askGPT(qn2)
body = askGPT(qn3)
name = github_link.split("/")[-1]
title = "\"" + name + " Github Repo Technical Documentation\""
final = basic_format.format(categories.strip(), categories.strip(), title, title, "\"" + des.strip() + "\"", body.strip())
print("done with asking openAI.")
with open("/Users/mengting_li/Desktop/personal/ai-docsy/content/en/docs/Getting started/{}.md".format(name), "w") as f:
f.write(final)
main()
| [] |
2024-01-10 | morancium/webautomation | upsert.py | import chromadb
import json
import os,random
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma, Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from dotenv import load_dotenv
load_dotenv("nlq to code\.env")
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
persist_directory='db'
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=0)
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
base_path="scraping\md"
dir_lst=os.listdir("scraping\md")
random.shuffle(dir_lst)
total_token=0
for dir in dir_lst:
all_text=""
for file in os.listdir(os.path.join(base_path,dir)):
with open(os.path.join(base_path,dir,file),'r',encoding="utf8") as f:
all_text += f.read()
total_token+=len(all_text)
texts=text_splitter.split_text(all_text)
print(len(all_text))
print(len(texts))
for t in texts:
vectordb=Chroma.from_texts([t], embedding=embeddings,persist_directory=persist_directory)
vectordb.persist()
vectordb = None
print(dir_lst)
print(total_token)
| [] |
2024-01-10 | philmui/asdrp2023 | 05_embeddings~src~app-02-chroma-BEGIN.py | #
# Using Embeddings with Chroma and LlamaIndex
#
# Leaderboard for MTEB:
# https://huggingface.co/spaces/mteb/leaderboard
#
from pathlib import Path
import sys
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage
)
from llama_index.vector_stores import ChromaVectorStore
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding
import chromadb
from chromadb.config import Settings
DATA_DIR = "../data"
STORAGE_DIR = "chroma_db"
# EMBEDDING_MODEL = "sentence-transformers/all-mpnet-base-v2"
EMBEDDING_MODEL = "intfloat/e5-large-v2"
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
def get_index():
index = None
embed_model = None # need to create our own embedding here
chroma_client = chromadb.Client(
Settings(chroma_db_impl="duckdb+parquet", persist_directory=STORAGE_DIR)
)
try:
# get an existing chroma collection
# instantiate a vector store for querying
# create an index from the vector store
pass
except ValueError as ve:
# did not get a valid chroma collection, let's create one
# get news articles from our local files
# create an index from the newly ingested docs
# save the index
pass
return index
def get_response(index, query):
query_engine = index.as_query_engine()
return query_engine.query(query)
if __name__ == '__main__':
index = get_index()
while True:
query = input("What question do you have? ('quit' to quit) ")
if "quit" in query: break
response = get_response(index, query)
print(response)
| [] |
2024-01-10 | philmui/asdrp2023 | 07_plugins~src~sk_fun.py | import os
import chainlit as cl
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
LLM_MODEL_NAME = "text-davinci-003" # OpenAI
SKILL_DIR = "../skills"
SKILL_COLLECTION = "FunSkill"
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAITextCompletion
kernel = sk.Kernel()
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
OPENAI_ORG_ID = os.environ["OPENAI_ORG_ID"]
kernel.add_text_completion_service(
service_id="dv",
service=OpenAITextCompletion(
LLM_MODEL_NAME,
OPENAI_API_KEY,
OPENAI_ORG_ID
)
)
skill = kernel.import_semantic_skill_from_directory(SKILL_DIR, SKILL_COLLECTION)
joke_skill = skill.get("Joke")
excuse_skill = skill.get("Excuses")
poem_skill = skill.get("Limerick")
SOLICITATION = "Tell me a subject about a joke, an excuse, or a poem!"
def route_message(message: str):
if "joke" in message.lower():
return joke_skill(message)
elif "excuse" in message.lower():
return excuse_skill(message)
elif "poem" in message.lower():
return poem_skill(message)
else:
return SOLICITATION
@cl.on_message
async def main(message: str):
response = route_message(message)
await cl.Message(
content=f"{response}"
).send()
@cl.on_chat_start
async def start():
await cl.Message(
content=SOLICITATION
).send()
| [] |
2024-01-10 | philmui/asdrp2023 | 05_embeddings~src~app-02-chroma-LIVE.py | #
# Using Embeddings with Chroma and LlamaIndex
#
# Chroma DB: https://www.trychroma.com/
#
# Leaderboard for MTEB:
# https://huggingface.co/spaces/mteb/leaderboard
#
from pathlib import Path
import sys
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage
)
from llama_index.vector_stores import ChromaVectorStore
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding
import chromadb
from chromadb.config import Settings
DATA_DIR = "../data"
STORAGE_DIR = "chroma_db"
# EMBEDDING_MODEL = "sentence-transformers/all-mpnet-base-v2"
EMBEDDING_MODEL = "intfloat/e5-large-v2"
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
def get_index():
index = None
embed_model = LangchainEmbedding(
HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)
)
chroma_client = chromadb.Client(
Settings(chroma_db_impl="duckdb+parquet", persist_directory=STORAGE_DIR)
)
try:
chroma_collection = chroma_client.get_collection("news")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_vector_store(
vector_store=vector_store, storage_context=storage_context, embed_model=embed_model
)
except ValueError as ve:
chroma_collection = chroma_client.create_collection("news")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
docs = SimpleDirectoryReader(DATA_DIR).load_data()
index = VectorStoreIndex.from_documents(
docs, storage_context=storage_context, embed_model=embed_model
)
chroma_client.persist()
return index
def get_response(index, query):
query_engine = index.as_query_engine()
return query_engine.query(query)
if __name__ == '__main__':
index = get_index()
while True:
query = input("What question do you have? ('quit' to quit) ")
if "quit" in query: break
response = get_response(index, query)
print(response)
| [] |
2024-01-10 | philmui/asdrp2023 | 09_agents~src~ChatAgent-notools.py | import chainlit as cl
import os
from langchain.llms import OpenAI
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
MODEL_NAME = "text-davinci-003"
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
def chat(query: str):
llm = OpenAI(openai_api_key=OPENAI_API_KEY,
model=MODEL_NAME,
temperature=0)
return llm(query)
@cl.on_message # for every user message
async def main(query: str):
# final answer
await cl.Message(
content=chat(query)
).send()
@cl.on_chat_start
async def start():
await cl.Message(
content="Hello there!"
).send() | [] |
2024-01-10 | philmui/asdrp2023 | 03_chatbot~src~app-04-few-shots.py | import asyncio
import os
import chainlit as cl
from langchain.prompts import (
PromptTemplate,
FewShotPromptTemplate
)
from langchain.llms import OpenAI
from langchain.prompts.example_selector import SemanticSimilarityExampleSelector
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
MODEL_NAME = "text-davinci-003"
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
examples = [
{"word": "happy", "antonym": "sad"},
{"word": "content", "antonym": "dissatisfied"},
{"word": "peaceful", "antonym": "belligerent"},
{"word": "tall", "antonym": "short"},
{"word": "high", "antonym": "low"},
{"word": "energetic", "antonym": "lethargic"},
{"word": "fast", "antonym": "slow"},
{"word": "sunny", "antonym": "gloomy"},
{"word": "clear", "antonym": "cloudy"},
{"word": "windy", "antonym": "calm"},
]
example_formatter_template = \
"""
Word: {word}
Antonym: {antonym}
"""
example_prompt = PromptTemplate(
input_variables=["word", "antonym"],
template=example_formatter_template,
)
@cl.on_message # for every user message
def main(input_word: str):
example_selector = SemanticSimilarityExampleSelector.from_examples (
examples,
# Class to create embeddings
OpenAIEmbeddings(),
# VectorStore class to store embeddings and do similarity search
Chroma,
# Number of examples to produce
k=2
)
fewshot_prompt = FewShotPromptTemplate(
# We provide an ExampleSelector instead of examples.
example_selector=example_selector,
example_prompt=example_prompt,
prefix="Give the antonym of every input word",
suffix="Word: {word}\nAntonym:",
input_variables=["word"],
)
llm = OpenAI(openai_api_key=OPENAI_API_KEY,
model=MODEL_NAME)
response = llm(fewshot_prompt.format(word=input_word))
response += "\n\n=> Enter a word:"
# final answer
asyncio.run(
cl.Message(
content=response
).send()
)
@cl.on_chat_start
def start():
output = ""
for e in examples:
output += f"word: {e['word']} <=> "
output += f"antonym: {e['antonym']}\n"
output += "\n\n=> Enter a word:"
asyncio.run(
cl.Message(
content=output
).send()
)
| [
"\nWord: {word}\nAntonym: {antonym}\n",
"Give the antonym of every input word",
"Word: {word}\nAntonym:",
"antonym"
] |
2024-01-10 | philmui/asdrp2023 | 09_agents~src~ChatAgent-withsearch.py | import chainlit as cl
import os
from langchain.llms import OpenAI
from dotenv import load_dotenv, find_dotenv
from langchain.agents import AgentType, Tool, load_tools
from langchain.memory import ConversationBufferMemory
from langchain import OpenAI
from langchain.utilities import SerpAPIWrapper
from langchain.agents import initialize_agent
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
MODEL_NAME = "text-davinci-003"
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
search = SerpAPIWrapper()
tools = [
Tool(
name = "Current Search",
func=search.run,
description="useful for when you need to answer questions about current events or the current state of the world"
),
]
memory = ConversationBufferMemory(memory_key="chat_history")
def chat(query: str):
llm = OpenAI(openai_api_key=OPENAI_API_KEY,
model=MODEL_NAME,
temperature=0)
agent_chain = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
memory=memory
)
return agent_chain.run(input=query)
@cl.on_message # for every user message
async def main(query: str):
response_text = chat(query)
# final answer
await cl.Message(
content=response_text
).send()
@cl.on_chat_start
async def start():
await cl.Message(
content="Hello there!"
).send() | [] |
2024-01-10 | philmui/asdrp2023 | 07_plugins~src~sk_skills01_declarative.py | import os
import chainlit as cl
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
LLM_MODEL_NAME = "text-davinci-003" # OpenAI
SKILL_DIR = "../skills"
SKILL_COLLECTION = "FunSkill"
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAITextCompletion
kernel = sk.Kernel()
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
OPENAI_ORG_ID = os.environ["OPENAI_ORG_ID"]
# add text completion service
# // TODO
SOLICITATION = "What do you want a joke to be about?"
@cl.on_message
async def main(message: str):
response = # // TODO
await cl.Message(
content=f"{response}"
).send()
@cl.on_chat_start
async def start():
await cl.Message(
content=SOLICITATION
).send()
| [] |
2024-01-10 | philmui/asdrp2023 | 03_chatbot~src~app-03-instruct-template.py | import asyncio
import os
import chainlit as cl
from langchain.prompts import (
PromptTemplate,
)
from langchain.llms import OpenAI
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
MODEL_NAME = "text-davinci-003"
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
prompt_template="""
You are a helpful assistant that truthfully respond to a user's query or question.
User's query: {query}
If you don't know the answer, simply answer: I don't know.
Most importantly, do not respond with false information.
"""
prompt=PromptTemplate(
input_variables=["query"],
template=prompt_template
)
@cl.on_message # for every user message
def main(message: str):
llm = OpenAI(openai_api_key=OPENAI_API_KEY,
model=MODEL_NAME)
response = llm(prompt.format(query=message))
# final answer
asyncio.run(
cl.Message(
content=response
).send()
)
@cl.on_chat_start
def start():
asyncio.run(
cl.Message(
content="Hello there!"
).send()
) | [
"\nYou are a helpful assistant that truthfully respond to a user's query or question.\n\nUser's query: {query}\n\nIf you don't know the answer, simply answer: I don't know. \nMost importantly, do not respond with false information.\n",
"t know the answer, simply answer: I don"
] |
2024-01-10 | philmui/asdrp2023 | 07_plugins~src~sk_skills02_inline.py | import os
import chainlit as cl
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
LLM_MODEL_NAME = "text-davinci-003" # OpenAI
SKILL_DIR = "../skills"
SKILL_COLLECTION = "FunSkill"
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAITextCompletion
kernel = sk.Kernel()
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
OPENAI_ORG_ID = os.environ["OPENAI_ORG_ID"]
# add text completion service
kernel.add_text_completion_service(
service_id="dv",
service=OpenAITextCompletion(
LLM_MODEL_NAME,
OPENAI_API_KEY,
OPENAI_ORG_ID
)
)
# need to create an inline prompt
prompt = """Summarize the content below in less than 2 sentences:
{{$input}}
"""
SOLICITATION = "Type in some text for me to summarize!"
# add text completion service
summarize_skill = kernel.create_semantic_function(
prompt,
max_tokens=2000,
temperature=0.2,
top_p=0.5)
@cl.on_message
async def main(message: str):
response = await summarize_skill.invoke_async(message)
await cl.Message(
content=f"{response}"
).send()
@cl.on_chat_start
async def start():
await cl.Message(
content=SOLICITATION
).send()
| [
"Summarize the content below in less than 2 sentences:\n{{$input}}\n"
] |
2024-01-10 | philmui/asdrp2023 | 09_agents~src~ChatAgent-withtools.py | import chainlit as cl
import os
from langchain.llms import OpenAI
from dotenv import load_dotenv, find_dotenv
from langchain import OpenAI
from langchain.agents import AgentType, Tool, load_tools, initialize_agent
from langchain.memory import ConversationBufferMemory
from langchain.tools import DuckDuckGoSearchRun
from langchain.utilities import SerpAPIWrapper
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
MODEL_NAME = "text-davinci-003"
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
memory = ConversationBufferMemory(memory_key="chat_history")
def chat(query: str):
llm = OpenAI(openai_api_key=OPENAI_API_KEY,
model=MODEL_NAME,
max_tokens=2048,
temperature=0)
toolkit = load_tools(
["serpapi", "open-meteo-api", "news-api",
"python_repl", "wolfram-alpha"],
llm=llm,
serpapi_api_key=os.getenv('SERPAPI_API_KEY'),
news_api_key=os.getenv('NEWS_API_KEY'),
tmdb_bearer_token=os.getenv('TMDB_BEARER_TOKEN')
)
## toolkit += [DuckDuckGoSearchRun()]
agent_chain = initialize_agent(
tools=toolkit,
llm=llm,
agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
memory=memory
)
return agent_chain.run(input=query)
@cl.on_message # for every user message
async def main(query: str):
response_text = chat(query)
# final answer
await cl.Message(
content=response_text
).send()
@cl.on_chat_start
async def start():
await cl.Message(
content="Hello there!"
).send() | [] |
2024-01-10 | philmui/asdrp2023 | 07_plugins~src~sk_skills04_context_chat.py | import os
import chainlit as cl
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
LLM_MODEL_NAME = "gpt-3.5-turbo" # OpenAI
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
kernel = sk.Kernel()
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
OPENAI_ORG_ID = os.environ["OPENAI_ORG_ID"]
kernel.add_chat_service(
service_id="chat-gpt",
service=OpenAIChatCompletion(
LLM_MODEL_NAME,
OPENAI_API_KEY,
OPENAI_ORG_ID
)
)
sk_prompt = """
ChatBot can have a conversation with you about any topic.
It can give explicit instructions or say 'I don't know' if it does not have an answer.
{{$history}}
User: {{$user_input}}
ChatBot: """
SOLICITATION = "Let's chat!"
chat_skill = kernel.create_semantic_function(
sk_prompt,
max_tokens=2000,
temperature=0.7,
top_p=0.5)
context = kernel.create_new_context()
context["history"] = ""
@cl.on_message
async def main(message: str) -> None:
context["user_input"] = message
response = await chat_skill.invoke_async(context=context)
await cl.Message(
content=f"{response}"
).send()
context["history"] += f"\nUser: {context['user_input']}\nChatBot: {response}\n"
print(f"=> history: {context['history']}")
@cl.on_chat_start
async def start() -> None:
await cl.Message(
content=SOLICITATION
).send()
| [
"\nChatBot can have a conversation with you about any topic.\nIt can give explicit instructions or say 'I don't know' if it does not have an answer.\n\n{{$history}}\nUser: {{$user_input}}\nChatBot: "
] |
2024-01-10 | philmui/asdrp2023 | 07_plugins~src~sk_plugin.py | import os
import chainlit as cl
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
LLM_MODEL_NAME = "text-davinci-003"
SKILL_DIR = "../skills"
SKILL_COLLECTION = "FunSkill"
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAITextCompletion
kernel = sk.Kernel()
kernel.add_text_completion_service(
service_id="dv",
service=OpenAITextCompletion(
LLM_MODEL_NAME,
os.environ.get("OPENAI_API_KEY"),
os.environ.get("OPENAI_ORG_ID")
)
)
fun_skills = kernel.import_semantic_skill_from_directory(SKILL_DIR, SKILL_COLLECTION)
joke_skill = fun_skills.get("Joke")
@cl.on_message
async def main(message: str):
response = await joke_skill.invoke_async(message)
await cl.Message(
content=f"{response}"
).send()
@cl.on_chat_start
async def start():
await cl.Message(
content="Hello there!"
).send()
| [] |
2024-01-10 | philmui/asdrp2023 | 07_plugins~src~sk_skills03_chatsummary.py | import os
import chainlit as cl
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
LLM_MODEL_NAME = "gpt-3.5-turbo" # OpenAI
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
kernel = sk.Kernel()
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
OPENAI_ORG_ID = os.environ["OPENAI_ORG_ID"]
# add text completion service
kernel.add_chat_service(
service_id="chat-gpt",
service=OpenAIChatCompletion(
LLM_MODEL_NAME,
OPENAI_API_KEY,
OPENAI_ORG_ID
)
)
SOLICITATION = "Type in some text for me to summarize!"
# key TODO : create a summarization prompt!
# create summarization skill
summarize_skill = # // TODO
@cl.on_message
async def main(message: str):
response = await summarize_skill.invoke_async(message)
await cl.Message(
content=f"{response}"
).send()
@cl.on_chat_start
async def start():
await cl.Message(
content=SOLICITATION
).send()
| [] |
2024-01-10 | philmui/asdrp2023 | 04_grounding~src~app-05-rag.py | import os
import asyncio
import chainlit as cl
from langchain.prompts import (
PromptTemplate,
)
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
import chromadb
from chromadb.config import Settings
DB_DIR = "./db"
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
LLM_MODEL_NAME = "text-davinci-003" # OpenAI
EMBEDDING_MODEL_NAME = "text-embedding-ada-002" # OpenAI
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
# create a prompt template
prompt_template = """
You are a helpful assistant that truthfully respond to a user's query about
the books Art of War or the Prince.
User's query: {query}
If you don't know the answer, simply answer: I don't know.
Most importantly, do not respond with false information.
"""
prompt = PromptTemplate(
input_variables=['query'],
template=prompt_template
)
@cl.on_message
def main(query: str):
retriever = None
embeddings = OpenAIEmbeddings(openai_api_key = os.environ['OPENAI_API_KEY'],
model=EMBEDDING_MODEL_NAME)
if not os.path.exists(DB_DIR):
# digest the texts into chunks & store their embeddings
loader = DirectoryLoader(path="../data/", glob="**/*.txt")
docs = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=20)
text_chunks = text_splitter.split_documents(documents=docs)
vectordb = Chroma.from_documents(text_chunks, embeddings,
persist_directory="./db")
else:
# lookup from existing stored embeddings
vectordb = Chroma(persist_directory=DB_DIR,
embedding_function=embeddings)
retriever = vectordb.as_retriever(search_type="mmr") # maximal margin R
qa = RetrievalQA.from_chain_type(llm = OpenAI(model=LLM_MODEL_NAME,
temperature=0.0),
chain_type="stuff",
retriever=retriever,
return_source_documents=True
)
try:
answer = qa({
"query": query
})
response = f"{answer['result']}\n"
for doc in answer['source_documents']:
tabbed_content = doc.page_content.replace("\n", "")
response += f"\n\t{doc.metadata['source']}: {tabbed_content[:60]}"
except Exception as e:
response = f"I don't know. Please ask another question. {e}"
asyncio.run(
cl.Message(
content=response
).send()
)
@cl.on_chat_start
def start():
asyncio.run(
cl.Message(
content="Ask me anything about The Prince or the Art of War!"
).send()
)
| [
"\nYou are a helpful assistant that truthfully respond to a user's query about \nthe books Art of War or the Prince.\n\nUser's query: {query}\n\nIf you don't know the answer, simply answer: I don't know. \nMost importantly, do not respond with false information.\n"
] |
2024-01-10 | philmui/asdrp2023 | 03_chatbot~src~app-02-chatbot.py | import asyncio
import chainlit as cl
import os
from langchain.chat_models import (
ChatOpenAI,
ChatGooglePalm,
ChatAnthropic
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
MODEL_NAME = "gpt-3.5-turbo"
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
system_prompt="""
You are a helpful assistant that truthfully respond to a user's query or question.
If you don't know the answer, simply answer: I don't know.
Most importantly, do not respond with false information.
"""
@cl.on_message # for every user message
def main(query: str):
messages = [
{'role':'system', 'content':system_prompt},
{'role':'user', 'content':query}
]
response_text=""
try:
chat = ChatOpenAI(temperature=0, model=MODEL_NAME)
response = chat.predict_messages(
[
SystemMessage(content=system_prompt),
HumanMessage(content=query)
]
)
response_text=response.content
except Exception as e:
response_text=f"no response: {e}"
# final answer
asyncio.run(
cl.Message(
content=response_text
).send()
)
@cl.on_chat_start
def start():
asyncio.run(
cl.Message(
content="Hello there!"
).send()
) | [
"\nYou are a helpful assistant that truthfully respond to a user's query or question.\n\nIf you don't know the answer, simply answer: I don't know. \nMost importantly, do not respond with false information.\n"
] |
2024-01-10 | philmui/asdrp2023 | 03_chatbot~src~app-01-simple.py | import asyncio
import chainlit as cl
import os
from langchain.llms import OpenAI
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(".env"))
MODEL_NAME = "text-davinci-003"
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
@cl.on_message # for every user message
def main(message: str):
llm = OpenAI(openai_api_key=OPENAI_API_KEY,
model=MODEL_NAME)
response = llm(message)
# final answer
asyncio.run(
cl.Message(
content=response
).send()
)
@cl.on_chat_start
def start():
asyncio.run(
cl.Message(
content="Hello there!"
).send()
) | [] |
2024-01-10 | farizrahman4u/loopgpt | loopgpt~models~azure_openai.py | from typing import List, Dict, Optional
from loopgpt.models.openai_ import OpenAIModel
from loopgpt.utils.openai_key import get_openai_key
from loopgpt.logger import logger
import time
from openai.error import RateLimitError
import requests
import openai
def get_deployment_details(endpoint, deployment_id, api_version, api_key):
api_key = get_openai_key(api_key)
response = requests.get(
f"{endpoint}/openai/deployments/{deployment_id}?api-version={api_version}",
headers={"api-key": api_key},
)
return response.json()
def get_deployment_model(endpoint, deployment_id, api_version, api_key):
details = get_deployment_details(endpoint, deployment_id, api_version, api_key)
model = details["model"]
return {
"gpt-35-turbo": "gpt-3.5-turbo",
"gpt-4": "gpt-4",
"gpt-4-32k": "gpt-4-32k",
}[model]
class AzureOpenAIModel(OpenAIModel):
"""Creates an Azure OpenAI model from a deployment ID. Can be created only when ``openai.api_type`` is set to ``azure``.
:param deployment_id: The deployment ID of the model.
:type deployment_id: str
:param api_key: The API key to use for the model.
If not specified, it will be found from ``openai.api_key`` or ``.env`` file or the ``OPENAI_API_KEY`` environment variable.
:type api_key: str, optional
:raises AssertionError: If ``openai.api_type`` is not set to ``azure``.
.. note::
You will also need an embedding provider deployed (e.g., text-embedding-ada-002) for creating an agent.
Example:
.. code-block:: python
import os
import openai
import loopgpt
from loopgpt.models import AzureOpenAIModel
from loopgpt.embeddings import AzureOpenAIEmbeddingProvider
openai.api_type = "azure"
openai.api_base = "https://<your deployment>.openai.azure.com/"
openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
model = AzureOpenAIModel("my-gpt4-deployment")
embedding_provider = AzureOpenAIEmbeddingProvider("my-embeddings-deployment")
agent = loopgpt.Agent(model=model, embedding_provider=embedding_provider)
agent.chat("Hello, how are you?")
"""
def __init__(self, deployment_id: str, api_key: Optional[str] = None):
# sanity check
assert (
openai.api_type == "azure"
), "AzureOpenAIModel can only be used with Azure API"
self.deployment_id = deployment_id
self.api_key = api_key
self.endpoint = openai.api_base
self.api_version = openai.api_version
self.model = get_deployment_model(
self.endpoint, self.deployment_id, self.api_version, self.api_key
)
def chat(
self,
messages: List[Dict[str, str]],
max_tokens: Optional[int] = None,
temperature: float = 0.8,
) -> str:
api_key = get_openai_key(self.api_key)
num_retries = 3
for _ in range(num_retries):
try:
resp = openai.ChatCompletion.create(
engine=self.deployment_id,
messages=messages,
api_key=api_key,
max_tokens=max_tokens,
temperature=temperature,
)["choices"][0]["message"]["content"]
return resp
except RateLimitError:
logger.warn("Rate limit exceeded. Retrying after 20 seconds.")
time.sleep(20)
continue
def config(self):
cfg = super().config()
cfg.update(
{
"deployment_id": self.deployment_id,
}
)
return cfg
@classmethod
def from_config(cls, config):
return cls(config["deployment_id"], config.get("api_key"))
| [] |
2024-01-10 | xorbitsai/inference | xinference~model~multimodal~tests~test_multimodal.py | # Copyright 2022-2023 XProbe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import pytest
import requests
@pytest.mark.skip(reason="Cost too many resources.")
def test_restful_api_for_qwen_vl(setup):
endpoint, _ = setup
from ....client import Client
client = Client(endpoint)
model_uid = client.launch_model(
model_uid="my_controlnet",
model_name="qwen-vl-chat",
model_type="multimodal",
)
model = client.get_model(model_uid)
prompt = [
{"type": "text", "text": "What’s in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
},
},
]
response = model.chat(prompt=prompt)
assert "grass" in response["choices"][0]["message"]["content"]
assert "tree" in response["choices"][0]["message"]["content"]
assert "sky" in response["choices"][0]["message"]["content"]
# openai client
import openai
client = openai.Client(api_key="not empty", base_url=f"{endpoint}/v1")
completion = client.chat.completions.create(
model=model_uid,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What’s in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
},
},
],
}
],
)
assert "grass" in completion.choices[0].message.content
assert "tree" in completion.choices[0].message.content
assert "sky" in completion.choices[0].message.content
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "这是什么?"},
{
"type": "image_url",
"image_url": {
"url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
},
},
],
}
]
completion = client.chat.completions.create(model=model_uid, messages=messages)
assert "女" in completion.choices[0].message.content
assert "狗" in completion.choices[0].message.content
assert "沙滩" in completion.choices[0].message.content
messages.append(completion.choices[0].message.model_dump())
messages.append({"role": "user", "content": "框出图中击掌的位置"})
completion = client.chat.completions.create(model=model_uid, messages=messages)
assert "击掌" in completion.choices[0].message.content
assert "<ref>" in completion.choices[0].message.content
assert "<box>" in completion.choices[0].message.content
# Test base64 image
response = requests.get(
"http://i.epochtimes.com/assets/uploads/2020/07/shutterstock_675595789-600x400.jpg"
)
# https://platform.openai.com/docs/guides/vision/uploading-base-64-encoded-images
# Function to encode the image
b64_img = base64.b64encode(response.content).decode("utf-8")
completion = client.chat.completions.create(
model=model_uid,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "图中有几条鱼?"},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{b64_img}",
},
},
],
}
],
)
assert "四条" in completion.choices[0].message.content
| [
"[{'type': 'text', 'text': '图中有几条鱼?'}, {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER'}}]",
"[{'type': 'text', 'text': '这是什么?'}, {'type': 'image_url', 'image_url': {'url': 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'}}]",
"框出图中击掌的位置",
"[{'type': 'text', 'text': 'What’s in this image?'}, {'type': 'image_url', 'image_url': {'url': 'https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg'}}]"
] |
2024-01-10 | xorbitsai/inference | xinference~types.py | # Copyright 2022-2023 XProbe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, ForwardRef, Iterable, List, Optional, Union
from pydantic import (
BaseModel,
create_model,
create_model_from_typeddict,
validate_arguments,
)
from typing_extensions import Literal, NotRequired, TypedDict
from .fields import (
echo_field,
frequency_penalty_field,
logprobs_field,
max_tokens_field,
none_field,
presence_penalty_field,
repeat_penalty_field,
stop_field,
stream_field,
stream_interval_field,
temperature_field,
top_k_field,
top_p_field,
)
SPECIAL_TOOL_PROMPT = "<TOOL>"
class Image(TypedDict):
url: Optional[str]
b64_json: Optional[str]
class ImageList(TypedDict):
created: int
data: List[Image]
class EmbeddingUsage(TypedDict):
prompt_tokens: int
total_tokens: int
class EmbeddingData(TypedDict):
index: int
object: str
embedding: List[float]
class Embedding(TypedDict):
object: Literal["list"]
model: str
data: List[EmbeddingData]
usage: EmbeddingUsage
class Document(TypedDict):
text: str
class DocumentObj(TypedDict):
index: int
relevance_score: float
document: Optional[Document]
class Rerank(TypedDict):
id: str
results: List[DocumentObj]
class CompletionLogprobs(TypedDict):
text_offset: List[int]
token_logprobs: List[Optional[float]]
tokens: List[str]
top_logprobs: List[Optional[Dict[str, float]]]
class CompletionChoice(TypedDict):
text: str
index: int
logprobs: Optional[CompletionLogprobs]
finish_reason: Optional[str]
class CompletionUsage(TypedDict):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class CompletionChunk(TypedDict):
id: str
object: Literal["text_completion"]
created: int
model: str
choices: List[CompletionChoice]
class Completion(TypedDict):
id: str
object: Literal["text_completion"]
created: int
model: str
choices: List[CompletionChoice]
usage: CompletionUsage
class ChatCompletionMessage(TypedDict):
role: str
content: Optional[str]
user: NotRequired[str]
tool_calls: NotRequired[List]
class ChatCompletionChoice(TypedDict):
index: int
message: ChatCompletionMessage
finish_reason: Optional[str]
class ChatCompletion(TypedDict):
id: str
object: Literal["chat.completion"]
created: int
model: str
choices: List[ChatCompletionChoice]
usage: CompletionUsage
class ChatCompletionChunkDelta(TypedDict):
role: NotRequired[str]
content: NotRequired[str]
class ChatCompletionChunkChoice(TypedDict):
index: int
delta: ChatCompletionChunkDelta
finish_reason: Optional[str]
class ChatCompletionChunk(TypedDict):
id: str
model: str
object: Literal["chat.completion.chunk"]
created: int
choices: List[ChatCompletionChunkChoice]
class ChatglmCppModelConfig(TypedDict, total=False):
pass
class ChatglmCppGenerateConfig(TypedDict, total=False):
max_tokens: int
top_p: float
temperature: float
stream: bool
class QWenCppModelConfig(TypedDict, total=False):
pass
class QWenCppGenerateConfig(TypedDict, total=False):
max_tokens: int
top_p: float
temperature: float
stream: bool
StoppingCriteria = Callable[[List[int], List[float]], bool]
class StoppingCriteriaList(List[StoppingCriteria]):
def __call__(self, input_ids: List[int], logits: List[float]) -> bool:
return any([stopping_criteria(input_ids, logits) for stopping_criteria in self])
LogitsProcessor = Callable[[List[int], List[float]], List[float]]
class LogitsProcessorList(List[LogitsProcessor]):
def __call__(self, input_ids: List[int], scores: List[float]) -> List[float]:
for processor in self:
scores = processor(input_ids, scores)
return scores
class LlamaCppGenerateConfig(TypedDict, total=False):
suffix: Optional[str]
max_tokens: int
temperature: float
top_p: float
logprobs: Optional[int]
echo: bool
stop: Optional[Union[str, List[str]]]
frequency_penalty: float
presence_penalty: float
repetition_penalty: float
top_k: int
stream: bool
tfs_z: float
mirostat_mode: int
mirostat_tau: float
mirostat_eta: float
model: Optional[str]
grammar: Optional[Any]
stopping_criteria: Optional["StoppingCriteriaList"]
logits_processor: Optional["LogitsProcessorList"]
tools: Optional[List[Dict]]
class LlamaCppModelConfig(TypedDict, total=False):
n_ctx: int
n_parts: int
n_gpu_layers: int
seed: int
f16_kv: bool
logits_all: bool
vocab_only: bool
use_mmap: bool
use_mlock: bool
embedding: bool
n_threads: Optional[int]
n_batch: int
last_n_tokens_size: int
lora_base: Optional[str]
lora_path: Optional[str]
low_vram: bool
n_gqa: Optional[int] # (TEMPORARY) must be 8 for llama2 70b
rms_norm_eps: Optional[float] # (TEMPORARY)
verbose: bool
class PytorchGenerateConfig(TypedDict, total=False):
temperature: float
repetition_penalty: float
top_p: float
top_k: int
stream: bool
max_tokens: int
echo: bool
stop: Optional[Union[str, List[str]]]
stop_token_ids: Optional[Union[int, List[int]]]
stream_interval: int
model: Optional[str]
tools: Optional[List[Dict]]
class PytorchModelConfig(TypedDict, total=False):
revision: Optional[str]
device: str
gpus: Optional[str]
num_gpus: int
max_gpu_memory: str
gptq_ckpt: Optional[str]
gptq_wbits: int
gptq_groupsize: int
gptq_act_order: bool
trust_remote_code: bool
def get_pydantic_model_from_method(
meth,
exclude_fields: Optional[Iterable[str]] = None,
include_fields: Optional[Dict[str, Any]] = None,
) -> BaseModel:
f = validate_arguments(meth, config={"arbitrary_types_allowed": True})
model = f.model
model.__fields__.pop("self", None)
model.__fields__.pop("args", None)
model.__fields__.pop("kwargs", None)
pydantic_private_keys = [
key for key in model.__fields__.keys() if key.startswith("v__")
]
for key in pydantic_private_keys:
model.__fields__.pop(key)
if exclude_fields is not None:
for key in exclude_fields:
model.__fields__.pop(key, None)
if include_fields is not None:
dummy_model = create_model("DummyModel", **include_fields)
model.__fields__.update(dummy_model.__fields__)
return model
def fix_forward_ref(model):
"""
pydantic in Python 3.8 generates ForwardRef field, we replace them
by the Optional[Any]
"""
exclude_fields = []
include_fields = {}
for key, field in model.__fields__.items():
if isinstance(field.annotation, ForwardRef):
exclude_fields.append(key)
include_fields[key] = (Optional[Any], None)
if exclude_fields:
for key in exclude_fields:
model.__fields__.pop(key, None)
if include_fields:
dummy_model = create_model("DummyModel", **include_fields)
model.__fields__.update(dummy_model.__fields__)
return model
class ModelAndPrompt(BaseModel):
model: str
prompt: str
class CreateCompletionTorch(BaseModel):
echo: bool = echo_field
max_tokens: int = max_tokens_field
repetition_penalty: float = repeat_penalty_field
stop: Optional[Union[str, List[str]]] = stop_field
stop_token_ids: Optional[Union[int, List[int]]] = none_field
stream: bool = stream_field
stream_interval: int = stream_interval_field
temperature: float = temperature_field
top_p: float = top_p_field
top_k: int = top_k_field
CreateCompletionLlamaCpp: BaseModel
try:
from llama_cpp import Llama
CreateCompletionLlamaCpp = get_pydantic_model_from_method(
Llama.create_completion,
exclude_fields=["model", "prompt", "grammar"],
include_fields={"grammar": (Optional[Any], None)},
)
except ImportError:
CreateCompletionLlamaCpp = create_model("CreateCompletionLlamaCpp")
CreateCompletionCTransformers: BaseModel
try:
from ctransformers.llm import LLM
CreateCompletionCTransformers = get_pydantic_model_from_method(
LLM.generate,
exclude_fields=["tokens"],
include_fields={
"max_tokens": (Optional[int], max_tokens_field),
"stream": (Optional[bool], stream_field),
},
)
except ImportError:
CreateCompletionCTransformers = create_model("CreateCompletionCTransformers")
# This type is for openai API compatibility
CreateCompletionOpenAI: BaseModel
class _CreateCompletionOpenAIFallback(BaseModel):
# OpenAI's create completion request body, we define it by pydantic
# model to verify the input params.
# https://platform.openai.com/docs/api-reference/completions/object
model: str
prompt: str
best_of: Optional[int] = 1
echo: bool = echo_field
frequency_penalty: Optional[float] = frequency_penalty_field
logit_bias: Optional[Dict[str, float]] = none_field
logprobs: Optional[int] = logprobs_field
max_tokens: int = max_tokens_field
n: Optional[int] = 1
presence_penalty: Optional[float] = presence_penalty_field
seed: Optional[int] = none_field
stop: Optional[Union[str, List[str]]] = stop_field
stream: bool = stream_field
suffix: Optional[str] = none_field
temperature: float = temperature_field
top_p: float = top_p_field
user: Optional[str] = none_field
try:
# For openai > 1
from openai.types.completion_create_params import CompletionCreateParamsNonStreaming
CreateCompletionOpenAI = create_model_from_typeddict(
CompletionCreateParamsNonStreaming,
)
CreateCompletionOpenAI = fix_forward_ref(CreateCompletionOpenAI)
except ImportError:
# TODO(codingl2k1): Remove it if openai < 1 is dropped.
CreateCompletionOpenAI = _CreateCompletionOpenAIFallback
class CreateCompletion(
ModelAndPrompt,
CreateCompletionTorch,
CreateCompletionLlamaCpp,
CreateCompletionCTransformers,
CreateCompletionOpenAI,
):
pass
class CreateChatModel(BaseModel):
model: str
# Currently, chat calls generates, so the params share the same one.
CreateChatCompletionTorch = CreateCompletionTorch
CreateChatCompletionLlamaCpp: BaseModel = CreateCompletionLlamaCpp
CreateChatCompletionCTransformers: BaseModel = CreateCompletionCTransformers
# This type is for openai API compatibility
CreateChatCompletionOpenAI: BaseModel
# Only support openai > 1
from openai.types.chat.completion_create_params import (
CompletionCreateParamsNonStreaming,
)
CreateChatCompletionOpenAI = create_model_from_typeddict(
CompletionCreateParamsNonStreaming,
)
CreateChatCompletionOpenAI = fix_forward_ref(CreateChatCompletionOpenAI)
class CreateChatCompletion(
CreateChatModel,
CreateChatCompletionTorch,
CreateChatCompletionLlamaCpp,
CreateChatCompletionCTransformers,
CreateChatCompletionOpenAI,
):
pass
| [
"<TOOL>"
] |
2024-01-10 | xorbitsai/inference | examples~LangChain_Streamlit_Doc_Chat.py | import streamlit as st
from langchain.llms import Xinference
from langchain.embeddings import XinferenceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
# Customize the layout
st.set_page_config(page_title="Local AI Chat Powered by Xinference", page_icon="🤖", layout="wide")
# Write uploaded file in temp dir
def write_text_file(content, file_path):
try:
with open(file_path, 'w') as file:
file.write(content)
return True
except Exception as e:
print(f"Error occurred while writing the file: {e}")
return False
# Prepare prompt template
prompt_template = """
使用下面的上下文来回答问题。
如果你不知道答案,就说你不知道,不要编造答案。
{context}
问题: {question}
回答:"""
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
# Initialize the Xinference LLM & Embeddings
xinference_server_url = "http://localhost:9997"
llm = Xinference(server_url=xinference_server_url, model_uid="my_llm")
embeddings = XinferenceEmbeddings(server_url=xinference_server_url, model_uid="my_embedding")
llm_chain = LLMChain(llm=llm, prompt=prompt)
st.title("📄文档对话")
uploaded_file = st.file_uploader("上传文件", type="txt")
if uploaded_file is not None:
content = uploaded_file.read().decode('utf-8')
file_path = "/tmp/file.txt"
write_text_file(content, file_path)
loader = TextLoader(file_path)
docs = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0)
texts = text_splitter.split_documents(docs)
db = Chroma.from_documents(texts, embeddings)
st.success("上传文档成功")
# Query through LLM
question = st.text_input("提问", placeholder="请问我任何关于文章的问题", disabled=not uploaded_file)
if question:
similar_doc = db.similarity_search(question, k=1)
st.write("相关上下文:")
st.write(similar_doc)
context = similar_doc[0].page_content
query_llm = LLMChain(llm=llm, prompt=prompt)
response = query_llm.run({"context": context, "question": question})
st.write(f"回答:{response}")
| [
"question",
"\n使用下面的上下文来回答问题。\n如果你不知道答案,就说你不知道,不要编造答案。\n{context}\n问题: {question}\n回答:",
"context"
] |
2024-01-10 | xorbitsai/inference | xinference~core~tests~test_restful_api.py | # Copyright 2022-2023 XProbe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import openai
import pytest
import requests
from packaging import version
from ...model.embedding import BUILTIN_EMBEDDING_MODELS
@pytest.mark.asyncio
async def test_restful_api(setup):
endpoint, _ = setup
url = f"{endpoint}/v1/models"
# list
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 0
# launch
payload = {
"model_uid": "test_restful_api",
"model_name": "orca",
"quantization": "q4_0",
}
response = requests.post(url, json=payload)
response_data = response.json()
model_uid_res = response_data["model_uid"]
assert model_uid_res == "test_restful_api"
# launch n_gpu error
payload = {
"model_uid": "test_restful_api",
"model_name": "orca",
"quantization": "q4_0",
"n_gpu": -1,
}
response = requests.post(url, json=payload)
assert response.status_code == 400
# same model uid
payload = {
"model_uid": "test_restful_api",
"model_name": "orca",
"quantization": "q4_0",
}
response = requests.post(url, json=payload)
assert response.status_code == 400
# list
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 1
# describe
response = requests.get(f"{endpoint}/v1/models/test_restful_api")
response_data = response.json()
assert response_data["model_name"] == "orca"
assert response_data["replica"] == 1
response = requests.delete(f"{endpoint}/v1/models/bogus")
assert response.status_code == 400
# generate
url = f"{endpoint}/v1/completions"
payload = {
"model": model_uid_res,
"prompt": "Once upon a time, there was a very old computer.",
}
response = requests.post(url, json=payload)
completion = response.json()
assert "text" in completion["choices"][0]
payload = {
"model": "bogus",
"prompt": "Once upon a time, there was a very old computer.",
}
response = requests.post(url, json=payload)
assert response.status_code == 400
payload = {
"prompt": "Once upon a time, there was a very old computer.",
}
response = requests.post(url, json=payload)
assert response.status_code == 422
# chat
url = f"{endpoint}/v1/chat/completions"
payload = {
"model": model_uid_res,
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi what can I help you?"},
{"role": "user", "content": "What is the capital of France?"},
],
"stop": ["\n"],
}
response = requests.post(url, json=payload)
completion = response.json()
assert "content" in completion["choices"][0]["message"]
payload = {
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi what can I help you?"},
{"role": "user", "content": "What is the capital of France?"},
],
}
response = requests.post(url, json=payload)
assert response.status_code == 422
payload = {
"model": "bogus",
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi what can I help you?"},
{"role": "user", "content": "What is the capital of France?"},
],
}
response = requests.post(url, json=payload)
assert response.status_code == 400
payload = {
"model": model_uid_res,
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi what can I help you?"},
],
}
response = requests.post(url, json=payload)
assert response.status_code == 400
# Duplicate system messages
payload = {
"model": model_uid_res,
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "system", "content": "You are not a helpful assistant."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi what can I help you?"},
{"role": "user", "content": "What is the capital of France?"},
],
}
response = requests.post(url, json=payload)
assert response.status_code == 400
# System message should be the first one.
payload = {
"model": model_uid_res,
"messages": [
{"role": "user", "content": "Hello!"},
{"role": "system", "content": "You are a helpful assistant."},
{"role": "assistant", "content": "Hi what can I help you?"},
{"role": "user", "content": "What is the capital of France?"},
],
}
response = requests.post(url, json=payload)
assert response.status_code == 400
# delete
url = f"{endpoint}/v1/models/test_restful_api"
response = requests.delete(url)
# list
response = requests.get(f"{endpoint}/v1/models")
response_data = response.json()
assert len(response_data) == 0
# delete again
url = f"{endpoint}/v1/models/test_restful_api"
response = requests.delete(url)
assert response.status_code == 400
# test for model that supports embedding
url = f"{endpoint}/v1/models"
payload = {
"model_uid": "test_restful_api2",
"model_name": "orca",
"quantization": "q4_0",
}
response = requests.post(url, json=payload)
response_data = response.json()
model_uid_res = response_data["model_uid"]
assert model_uid_res == "test_restful_api2"
url = f"{endpoint}/v1/embeddings"
payload = {
"model": "test_restful_api2",
"input": "The food was delicious and the waiter...",
}
response = requests.post(url, json=payload)
embedding_res = response.json()
assert "embedding" in embedding_res["data"][0]
url = f"{endpoint}/v1/models/test_restful_api2"
response = requests.delete(url)
# list model registration
url = f"{endpoint}/v1/model_registrations/LLM"
response = requests.get(url)
assert response.status_code == 200
model_regs = response.json()
assert len(model_regs) > 0
for model_reg in model_regs:
assert model_reg["is_builtin"]
# register_model
model = """{
"version": 1,
"context_length":2048,
"model_name": "custom_model",
"model_lang": [
"en", "zh"
],
"model_ability": [
"embed",
"chat"
],
"model_family": "other",
"model_specs": [
{
"model_format": "pytorch",
"model_size_in_billions": 7,
"quantizations": [
"4-bit",
"8-bit",
"none"
],
"model_id": "ziqingyang/chinese-alpaca-2-7b"
}
],
"prompt_style": {
"style_name": "ADD_COLON_SINGLE",
"system_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.",
"roles": [
"Instruction",
"Response"
],
"intra_message_sep": "\\n\\n### "
}
}"""
url = f"{endpoint}/v1/model_registrations/LLM"
payload = {"model": model, "persist": False}
response = requests.post(url, json=payload)
assert response.status_code == 200
url = f"{endpoint}/v1/model_registrations/LLM"
response = requests.get(url)
assert response.status_code == 200
new_model_regs = response.json()
assert len(new_model_regs) == len(model_regs) + 1
# get_model_registrations
url = f"{endpoint}/v1/model_registrations/LLM/custom_model"
response = requests.get(url, json=payload)
assert response.status_code == 200
data = response.json()
assert "custom_model" in data["model_name"]
# unregister_model
url = f"{endpoint}/v1/model_registrations/LLM/custom_model"
response = requests.delete(url, json=payload)
assert response.status_code == 200
url = f"{endpoint}/v1/model_registrations/LLM"
response = requests.get(url)
assert response.status_code == 200
new_model_regs = response.json()
assert len(new_model_regs) == len(model_regs)
custom_model_reg = None
for model_reg in new_model_regs:
if model_reg["model_name"] == "custom_model":
custom_model_reg = model_reg
assert custom_model_reg is None
def test_restful_api_for_embedding(setup):
model_name = "gte-base"
model_spec = BUILTIN_EMBEDDING_MODELS[model_name]
endpoint, _ = setup
url = f"{endpoint}/v1/models"
# list
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 0
# launch
payload = {
"model_uid": "test_embedding",
"model_name": model_name,
"model_type": "embedding",
}
response = requests.post(url, json=payload)
response_data = response.json()
model_uid_res = response_data["model_uid"]
assert model_uid_res == "test_embedding"
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 1
# test embedding
url = f"{endpoint}/v1/embeddings"
payload = {
"model": "test_embedding",
"input": "The food was delicious and the waiter...",
}
response = requests.post(url, json=payload)
embedding_res = response.json()
assert "embedding" in embedding_res["data"][0]
assert len(embedding_res["data"][0]["embedding"]) == model_spec.dimensions
# test multiple
payload = {
"model": "test_embedding",
"input": [
"The food was delicious and the waiter...",
"how to implement quick sort in python?",
"Beijing",
"sorting algorithms",
],
}
response = requests.post(url, json=payload)
embedding_res = response.json()
assert len(embedding_res["data"]) == 4
for data in embedding_res["data"]:
assert len(data["embedding"]) == model_spec.dimensions
# delete model
url = f"{endpoint}/v1/models/test_embedding"
response = requests.delete(url)
assert response.status_code == 200
response = requests.get(f"{endpoint}/v1/models")
response_data = response.json()
assert len(response_data) == 0
def _check_invalid_tool_calls(endpoint, model_uid_res):
import openai
client = openai.Client(api_key="not empty", base_url=f"{endpoint}/v1")
tools = [
{
"type": "function",
"function": {
"name": "get_exchange_rate",
"description": "Get the exchange rate between two currencies",
"parameters": {
"type": "object",
"properties": {
"base_currency": {
"type": "string",
"description": "The currency to convert from",
},
"target_currency": {
"type": "string",
"description": "The currency to convert to",
},
},
"required": ["base_currency", "target_currency"],
},
},
}
]
completion = client.chat.completions.create(
model=model_uid_res,
messages=[
{
"content": "Can you book a flight for me from New York to London?",
"role": "user",
}
],
tools=tools,
max_tokens=200,
temperature=0.1,
)
assert "stop" == completion.choices[0].finish_reason
assert completion.choices[0].message.content
assert len(completion.choices[0].message.tool_calls) == 0
@pytest.mark.parametrize(
"model_format, quantization", [("ggmlv3", "q4_0"), ("pytorch", None)]
)
@pytest.mark.skip(reason="Cost too many resources.")
def test_restful_api_for_tool_calls(setup, model_format, quantization):
model_name = "chatglm3"
endpoint, _ = setup
url = f"{endpoint}/v1/models"
# list
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 0
# launch
payload = {
"model_uid": "test_tool",
"model_name": model_name,
"model_size_in_billions": 6,
"model_format": model_format,
"quantization": quantization,
}
response = requests.post(url, json=payload)
response_data = response.json()
model_uid_res = response_data["model_uid"]
assert model_uid_res == "test_tool"
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 1
# tool
tools = [
{
"type": "function",
"function": {
"name": "track",
"description": "追踪指定股票的实时价格",
"parameters": {
"type": "object",
"properties": {"symbol": {"description": "需要追踪的股票代码"}},
"required": ["symbol"],
},
},
},
{
"type": "function",
"function": {
"name": "text-to-speech",
"description": "将文本转换为语音",
"parameters": {
"type": "object",
"properties": {
"text": {"description": "需要转换成语音的文本"},
"voice": {"description": "要使用的语音类型(男声、女声等)"},
"speed": {"description": "语音的速度(快、中等、慢等)"},
},
"required": ["text"],
},
},
},
]
url = f"{endpoint}/v1/chat/completions"
payload = {
"model": model_uid_res,
"messages": [
{"role": "user", "content": "帮我查询股票10111的价格"},
],
"tools": tools,
"stop": ["\n"],
}
response = requests.post(url, json=payload)
completion = response.json()
assert "content" in completion["choices"][0]["message"]
assert "tool_calls" == completion["choices"][0]["finish_reason"]
assert (
"track"
== completion["choices"][0]["message"]["tool_calls"][0]["function"]["name"]
)
arguments = completion["choices"][0]["message"]["tool_calls"][0]["function"][
"arguments"
]
arg = json.loads(arguments)
assert arg == {"symbol": "10111"}
# Restful client
from ...client import RESTfulClient
client = RESTfulClient(endpoint)
model = client.get_model(model_uid_res)
completion = model.chat("帮我查询股票10111的价格", tools=tools)
assert "content" in completion["choices"][0]["message"]
assert "tool_calls" == completion["choices"][0]["finish_reason"]
assert (
"track"
== completion["choices"][0]["message"]["tool_calls"][0]["function"]["name"]
)
arguments = completion["choices"][0]["message"]["tool_calls"][0]["function"][
"arguments"
]
arg = json.loads(arguments)
assert arg == {"symbol": "10111"}
# openai client
import openai
client = openai.Client(api_key="not empty", base_url=f"{endpoint}/v1")
completion = client.chat.completions.create(
model=model_uid_res,
messages=[{"role": "user", "content": "帮我查询股票10111的价格"}],
tools=tools,
)
assert "tool_calls" == completion.choices[0].finish_reason
assert "track" == completion.choices[0].message.tool_calls[0].function.name
arguments = completion.choices[0].message.tool_calls[0].function.arguments
arg = json.loads(arguments)
assert arg == {"symbol": "10111"}
assistant_message = completion.choices[0].message.model_dump()
messages = [
{"role": "user", "content": "帮我查询股票10111的价格"},
assistant_message,
{
"role": "tool",
"tool_call_id": assistant_message["tool_calls"][0]["id"],
"name": assistant_message["tool_calls"][0]["function"]["name"],
"content": str({"symbol": "10111", "price": 12345}),
},
]
for kwargs in [{"tools": tools}, {}]:
completion = client.chat.completions.create(
model=model_uid_res, messages=messages, **kwargs
)
assert completion.choices
assert completion.choices[0].finish_reason == "stop"
assert "10111" in completion.choices[0].message.content
assert "12345" in completion.choices[0].message.content
_check_invalid_tool_calls(endpoint, model_uid_res)
@pytest.mark.parametrize(
"model_format, quantization", [("ggufv2", "Q4_K_S"), ("pytorch", None)]
)
@pytest.mark.skip(reason="Cost too many resources.")
def test_restful_api_for_gorilla_openfunctions_tool_calls(
setup, model_format, quantization
):
model_name = "gorilla-openfunctions-v1"
endpoint, _ = setup
url = f"{endpoint}/v1/models"
# list
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 0
# launch
payload = {
"model_uid": "test_tool",
"model_name": model_name,
"model_size_in_billions": 7,
"model_format": model_format,
"quantization": quantization,
}
response = requests.post(url, json=payload)
response_data = response.json()
model_uid_res = response_data["model_uid"]
assert model_uid_res == "test_tool"
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 1
# tool
tools = [
{
"type": "function",
"function": {
"name": "uber_ride",
"description": "Find suitable ride for customers given the location, "
"type of ride, and the amount of time the customer is "
"willing to wait as parameters",
"parameters": {
"type": "object",
"properties": {
"loc": {
"type": "int",
"description": "Location of the starting place of the Uber ride",
},
"type": {
"type": "string",
"enum": ["plus", "comfort", "black"],
"description": "Types of Uber ride user is ordering",
},
"time": {
"type": "int",
"description": "The amount of time in minutes the customer is willing to wait",
},
},
},
},
}
]
url = f"{endpoint}/v1/chat/completions"
payload = {
"model": model_uid_res,
"messages": [
{
"role": "user",
"content": 'Call me an Uber ride type "Plus" in Berkeley at zipcode 94704 in 10 minutes',
},
],
"tools": tools,
"stop": ["\n"],
"max_tokens": 200,
"temperature": 0,
}
response = requests.post(url, json=payload)
completion = response.json()
assert "content" in completion["choices"][0]["message"]
assert "tool_calls" == completion["choices"][0]["finish_reason"]
assert (
"uber_ride"
== completion["choices"][0]["message"]["tool_calls"][0]["function"]["name"]
)
arguments = completion["choices"][0]["message"]["tool_calls"][0]["function"][
"arguments"
]
arg = json.loads(arguments)
assert arg == {"loc": 94704, "time": 10, "type": "plus"}
_check_invalid_tool_calls(endpoint, model_uid_res)
@pytest.mark.parametrize(
"model_format, quantization",
[
("pytorch", None),
("ggufv2", "Q4_K_M"),
],
)
@pytest.mark.skip(reason="Cost too many resources.")
def test_restful_api_for_qwen_tool_calls(setup, model_format, quantization):
model_name = "qwen-chat"
endpoint, _ = setup
url = f"{endpoint}/v1/models"
# list
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 0
# launch
payload = {
"model_uid": "test_tool",
"model_name": model_name,
"model_size_in_billions": 7,
"model_format": model_format,
"quantization": quantization,
}
response = requests.post(url, json=payload)
response_data = response.json()
model_uid_res = response_data["model_uid"]
assert model_uid_res == "test_tool"
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 1
# tool
tools = [
{
"type": "function",
"function": {
"name": "google_search",
"description": "谷歌搜索是一个通用搜索引擎,可用于访问互联网、查询百科知识、了解时事新闻等。",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "搜索关键词或短语",
},
},
"required": ["search_query"],
},
},
},
{
"type": "function",
"function": {
"name": "image_gen",
"description": "文生图是一个AI绘画(图像生成)服务,输入文本描述,返回根据文本作画得到的图片的URL。",
"parameters": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "英文关键词,描述了希望图像具有什么内容",
},
},
"required": ["prompt"],
},
},
},
]
url = f"{endpoint}/v1/chat/completions"
payload = {
"model": model_uid_res,
"messages": [
{
"role": "user",
"content": "谁是周杰伦?",
},
],
"tools": tools,
"max_tokens": 2048,
"temperature": 0,
}
response = requests.post(url, json=payload)
completion = response.json()
assert "content" in completion["choices"][0]["message"]
assert "tool_calls" == completion["choices"][0]["finish_reason"]
assert (
"google_search"
== completion["choices"][0]["message"]["tool_calls"][0]["function"]["name"]
)
arguments = completion["choices"][0]["message"]["tool_calls"][0]["function"][
"arguments"
]
arg = json.loads(arguments)
assert arg == {"search_query": "周杰伦"}
# Check tool message.
payload = {
"model": model_uid_res,
"messages": [
{
"role": "user",
"content": "谁是周杰伦?",
},
completion["choices"][0]["message"],
{
"role": "tool",
"content": "Jay Chou is a Taiwanese singer, songwriter, record producer, rapper, actor, television personality, and businessman.",
},
],
"tools": tools,
"max_tokens": 2048,
"temperature": 0,
}
response = requests.post(url, json=payload)
completion2 = response.json()
assert "stop" == completion2["choices"][0]["finish_reason"]
assert "周杰伦" in completion2["choices"][0]["message"]["content"]
assert "歌手" in completion2["choices"][0]["message"]["content"]
# Check continue tool call.
payload = {
"model": model_uid_res,
"messages": [
{
"role": "user",
"content": "谁是周杰伦?",
},
completion["choices"][0]["message"],
{
"role": "tool",
"content": "Jay Chou is a Taiwanese singer, songwriter, record producer, rapper, actor, television personality, and businessman.",
},
completion2["choices"][0]["message"],
{"role": "user", "content": "画一个他的卡通形象出来"},
],
"tools": tools,
"max_tokens": 2048,
"temperature": 0,
}
response = requests.post(url, json=payload)
completion3 = response.json()
assert "tool_calls" == completion3["choices"][0]["finish_reason"]
assert (
"image_gen"
== completion3["choices"][0]["message"]["tool_calls"][0]["function"]["name"]
)
arguments = completion3["choices"][0]["message"]["tool_calls"][0]["function"][
"arguments"
]
arg = json.loads(arguments)
assert "Jay Chou" in arg["prompt"]
_check_invalid_tool_calls(endpoint, model_uid_res)
def test_restful_api_with_request_limits(setup):
model_name = "gte-base"
endpoint, _ = setup
url = f"{endpoint}/v1/models"
# test embedding
# launch
payload = {
"model_uid": "test_embedding",
"model_name": model_name,
"model_type": "embedding",
"request_limits": 0,
}
response = requests.post(url, json=payload)
response_data = response.json()
model_uid_res = response_data["model_uid"]
assert model_uid_res == "test_embedding"
# test embedding
url = f"{endpoint}/v1/embeddings"
payload = {
"model": "test_embedding",
"input": "The food was delicious and the waiter...",
}
response = requests.post(url, json=payload)
assert response.status_code == 429
assert "Rate limit reached" in response.json()["detail"]
# delete model
url = f"{endpoint}/v1/models/test_embedding"
response = requests.delete(url)
assert response.status_code == 200
# test llm
url = f"{endpoint}/v1/models"
payload = {
"model_uid": "test_restful_api",
"model_name": "orca",
"quantization": "q4_0",
"request_limits": 0,
}
response = requests.post(url, json=payload)
response_data = response.json()
model_uid_res = response_data["model_uid"]
assert model_uid_res == "test_restful_api"
# generate
url = f"{endpoint}/v1/completions"
payload = {
"model": model_uid_res,
"prompt": "Once upon a time, there was a very old computer.",
}
response = requests.post(url, json=payload)
assert response.status_code == 429
assert "Rate limit reached" in response.json()["detail"]
@pytest.mark.asyncio
@pytest.mark.skipif(
sys.platform == "win32", reason="Window CI hangs after run this case."
)
async def test_openai(setup):
endpoint, _ = setup
url = f"{endpoint}/v1/models"
# list
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 0
# launch
payload = {
"model_uid": "test_restful_api",
"model_name": "orca",
"quantization": "q4_0",
}
response = requests.post(url, json=payload)
response_data = response.json()
model_uid_res = response_data["model_uid"]
assert model_uid_res == "test_restful_api"
# chat
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi what can I help you?"},
{"role": "user", "content": "What is the capital of France?"},
]
result = []
if version.parse(openai.__version__) < version.parse("1.0"):
openai.api_key = ""
openai.api_base = f"{endpoint}/v1"
openai_chat_completion = openai.ChatCompletion.acreate
stream_chunk_type_name = "OpenAIObject"
response_type_name = "OpenAIObject"
else:
client = openai.AsyncClient(api_key="not empty", base_url=f"{endpoint}/v1")
openai_chat_completion = client.chat.completions.create
stream_chunk_type_name = "ChatCompletionChunk"
response_type_name = "ChatCompletion"
async for chunk in await openai_chat_completion(
messages=messages, stream=True, model=model_uid_res
):
if not hasattr(chunk, "choices") or len(chunk.choices) == 0:
continue
result.append(chunk)
assert result
assert type(result[0]).__name__ == stream_chunk_type_name
result = await openai_chat_completion(
messages=messages, stream=False, model=model_uid_res
)
assert result
assert type(result).__name__ == response_type_name
def test_lang_chain(setup):
endpoint, _ = setup
url = f"{endpoint}/v1/models"
# list
response = requests.get(url)
response_data = response.json()
assert len(response_data) == 0
# launch
payload = {
"model_uid": "test_restful_api",
"model_name": "orca",
"quantization": "q4_0",
}
response = requests.post(url, json=payload)
response_data = response.json()
model_uid_res = response_data["model_uid"]
assert model_uid_res == "test_restful_api"
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.schema import AIMessage, HumanMessage, SystemMessage
inference_server_url = f"{endpoint}/v1"
chat = ChatOpenAI(
model=model_uid_res,
openai_api_key="EMPTY",
openai_api_base=inference_server_url,
max_tokens=5,
temperature=0,
)
messages = [
SystemMessage(
content="You are a helpful assistant that translates English to Italian."
),
HumanMessage(
content="Translate the following sentence from English to Italian: I love programming."
),
]
r = chat(messages)
assert type(r) == AIMessage
assert r.content
assert "amo" in r.content.lower()
template = "You are a helpful assistant that translates {input_language} to {output_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
# get a chat completion from the formatted messages
r = chat(
chat_prompt.format_prompt(
input_language="English",
output_language="Italian",
text="I love programming.",
).to_messages()
)
assert type(r) == AIMessage
assert r.content
| [
"画一个他的卡通形象出来",
"帮我查询股票10111的价格",
"You are a helpful assistant that translates {input_language} to {output_language}.",
"Call me an Uber ride type \"Plus\" in Berkeley at zipcode 94704 in 10 minutes",
"{'symbol': '10111', 'price': 12345}",
"You are not a helpful assistant.",
"Hi what can I help you?",
"Can you book a flight for me from New York to London?",
"[PLACEHOLDER, PLACEHOLDER]",
"Translate the following sentence from English to Italian: I love programming.",
"You are a helpful assistant that translates English to Italian.",
"What is the capital of France?",
"You are a helpful assistant.",
"谁是周杰伦?",
"Hello!",
"Jay Chou is a Taiwanese singer, songwriter, record producer, rapper, actor, television personality, and businessman.",
"{text}"
] |
2024-01-10 | navikt/oet-chat | emb_noemb.py | import openai, os, requests
import getpass
openai.__version__ # 0.28.0
openai.api_type = "azure"
# Azure OpenAI on your own data is only supported by the 2023-08-01-preview API version
openai.api_version = "2023-08-01-preview"
# Azure OpenAI setup
openai.api_base = "https://faggruppe-gpt.openai.azure.com/" # Add your endpoint here
openai.api_key = getpass.getpass() # Add your OpenAI API key here
deployment_id = "gpt-4" # Add your deployment ID here
# Azure Cognitive Search setup
search_endpoint = "https://sprakteknologi-ai-search.search.windows.net"; # Add your Azure Cognitive Search endpoint here
search_key = getpass.getpass(); # Add your Azure Cognitive Search admin key here
def setup_byod(deployment_id: str) -> None:
"""Sets up the OpenAI Python SDK to use your own data for the chat endpoint.
:param deployment_id: The deployment ID for the model to use with your own data.
To remove this configuration, simply set openai.requestssession to None.
"""
class BringYourOwnDataAdapter(requests.adapters.HTTPAdapter):
def send(self, request, **kwargs):
request.url = f"{openai.api_base}/openai/deployments/{deployment_id}/extensions/chat/completions?api-version={openai.api_version}"
return super().send(request, **kwargs)
session = requests.Session()
# Mount a custom adapter which will use the extensions endpoint for any call using the given `deployment_id`
session.mount(
prefix=f"{openai.api_base}/openai/deployments/{deployment_id}",
adapter=BringYourOwnDataAdapter()
)
openai.requestssession = session
setup_byod(deployment_id)
def run_query(query, system_content, datasource):
completion = openai.ChatCompletion.create(
messages=[
{"role": "system", "content":system_content},
{"role": "assistant", "content": ""},
{"role": "user", "content": query},
],
deployment_id=deployment_id,
dataSources=[ # camelCase is intentional, as this is the format the API expects
{
"type": "AzureCognitiveSearch",
"parameters": {
"endpoint": search_endpoint,
"key": search_key,
"indexName": datasource,
}
}
]
)
return completion
query = "jeg skal arrangere et møte med varighet over 3 timer utenfor eget arbeidssted. får jeg dekket servering?"
system_content= '''Follow these instructions:
1) Answer question given from the user.
2) only give answers based on the context.
3) do not give answers based on your own knowledge.
4) stick to new norwegian.
'''
# datasource using embedding
emb = run_query(query, system_content, datasource="emb")
# datasource without embedding
noemb = run_query(query, system_content, datasource="noemb")
# svar
emb["choices"][0]['message']['content']
noemb["choices"][0]['message']['content']
# referanser
emb["choices"][0]['message']['context']['messages'][0]['content']
noemb["choices"][0]['message']['context']['messages'][0]['content']
| [
"jeg skal arrangere et møte med varighet over 3 timer utenfor eget arbeidssted. får jeg dekket servering?",
"Follow these instructions:\n1) Answer question given from the user.\n2) only give answers based on the context.\n3) do not give answers based on your own knowledge.\n4) stick to new norwegian.\n"
] |
2024-01-10 | davolu/LangchainAgentPDFCSVChat | llama-csv.py | from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import CTransformers
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
import sys
DB_FAISS_PATH = "vectorstore/db_faiss"
loader = CSVLoader(file_path="data/2019.csv", encoding="utf-8", csv_args={'delimiter': ','})
data = loader.load()
print(data)
# Split the text into Chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
text_chunks = text_splitter.split_documents(data)
print(len(text_chunks))
# Download Sentence Transformers Embedding From Hugging Face
embeddings = HuggingFaceEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L6-v2')
# COnverting the text Chunks into embeddings and saving the embeddings into FAISS Knowledge Base
docsearch = FAISS.from_documents(text_chunks, embeddings)
docsearch.save_local(DB_FAISS_PATH)
#query = "What is the value of GDP per capita of Finland provided in the data?"
#docs = docsearch.similarity_search(query, k=3)
#print("Result", docs)
llm = CTransformers(model="models/llama-2-7b-chat.ggmlv3.q4_0.bin",
model_type="llama",
max_new_tokens=512,
temperature=0.1)
qa = ConversationalRetrievalChain.from_llm(llm, retriever=docsearch.as_retriever())
while True:
chat_history = []
#query = "What is the value of GDP per capita of Finland provided in the data?"
query = input(f"Input Prompt: ")
if query == 'exit':
print('Exiting')
sys.exit()
if query == '':
continue
result = qa({"question":query, "chat_history":chat_history})
print("Response: ", result['answer']) | [] |
2024-01-10 | davolu/LangchainAgentPDFCSVChat | pdf-gpt-api.py | import os
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from flask import Flask, request, jsonify
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import ElasticVectorSearch, Pinecone, Weaviate, FAISS
load_dotenv()
# Initialize Flask app
app = Flask(__name__)
# Create a directory for storing uploaded files within the app context
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB
UPLOAD_FOLDER = os.path.join(app.root_path, 'uploads')
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
@app.route('/process_pdf', methods=['POST'])
def process_pdf():
# Load the OpenAI API key from the environment variable
if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "":
return jsonify({"error": "OPENAI_API_KEY is not set"}), 500
pdf_file = request.files.get('pdf_file')
if pdf_file is None:
return jsonify({"error": "No PDF file provided"}), 400
# Get the original file name
original_filename = pdf_file.filename
# Create a path for saving the uploaded file
file_path = os.path.join(UPLOAD_FOLDER, original_filename)
# Save the uploaded file with the original name
pdf_file.save(file_path)
# location of the pdf file/files.
reader = PdfReader(file_path)
# read data from the file and put them into a variable called raw_text
raw_text = ''
for i, page in enumerate(reader.pages):
text = page.extract_text()
if text:
raw_text += text
# We need to split the text that we read into smaller chunks so that during information retreival we don't hit the token size limits.
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function = len,
)
texts = text_splitter.split_text(raw_text)
# Download embeddings from OpenAI
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_texts(texts, embeddings)
chain = load_qa_chain(OpenAI(), chain_type="stuff")
query = "I'm 28 years old. Can I run for presidency?"
docs = docsearch.similarity_search(query)
response = chain.run(input_documents=docs, question=query)
# You can format the response as needed, e.g., convert to JSON
response_json = {"answer": response}
return jsonify(response_json), 200
if __name__ == "__main__":
app.run(debug=True)
| [] |
2024-01-10 | davolu/LangchainAgentPDFCSVChat | csv-gpt-api.py | from langchain.agents import create_csv_agent
from langchain.llms import OpenAI
from dotenv import load_dotenv
import os
from flask import Flask, request, jsonify
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB
# Create a directory for storing uploaded files within the app context
UPLOAD_FOLDER = os.path.join(app.root_path, 'uploads')
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
@app.route('/process_csv', methods=['POST'])
def process_csv():
load_dotenv()
# Load the OpenAI API key from the environment variable
if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "":
return jsonify({"error": "OPENAI_API_KEY is not set"}), 500
csv_file = request.files.get('csv_file')
if csv_file is None:
return jsonify({"error": "No CSV file provided"}), 400
# Get the original file name
original_filename = csv_file.filename
# Create a path for saving the uploaded file
file_path = os.path.join(UPLOAD_FOLDER, original_filename)
# Save the uploaded file with the original name
csv_file.save(file_path)
agent = create_csv_agent(
OpenAI(temperature=0, max_tokens=500), file_path, verbose=True)
prompt = "Which product line had the lowest average price"
if prompt is None or prompt == "":
return jsonify({"error": "No user question provided"}), 400
response = agent.run(prompt)
# You can format the response as needed, e.g., convert to JSON
response_json = {"answer": response}
return jsonify(response_json), 200
if __name__ == "__main__":
app.run(debug=True)
| [
"Which product line had the lowest average price"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.