date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518172538.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt.split('[')[1])
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518175218.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://www.nytimes.com/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"published\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["published"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"published\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518171745.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:15000]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.write(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623010531.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input_example = "https://news.yahoo.com"
# url_input_example = "https://laion.ai/blog/" # OK
# url_input_example = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input_example = "https://www.theguardian.com/international" #OK
# url_input_example = "https://www.bloomberg.com/europe" #NOK
# url_input_example = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
### USER INPUT HERE
if validators.url(url_input_example):
url_to_watch = st.text_input("Input your URL here", url_input_example)
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
st.header("ERROR : The URL is not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
topic_of_interest = st.text_input("What is your topic of interest ?","Ukraine War")#UI
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
### UI OUTPUT HERE
st.json(output_json)
for j in output_json:
print(j)
# st.header(article["title"])
# st.text(article["date"])
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"sentence1",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518164950.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
for article in parsed_articles:
print(article["title"])
#st.write(article["title"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518164841.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
for article in parsed_articles:
st.write(article["title"])
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518175426.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://www.lemonde.fr/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518175655.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005856.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input = "https://news.yahoo.com"
# url_input = "https://laion.ai/blog/" # OK
# url_input = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input = "https://www.theguardian.com/international" #OK
# url_input = "https://www.bloomberg.com/europe" #NOK
# url_input = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
### USER INPUT HERE
if validators.url(url_input):
url_to_watch = st.text_input("Input your URL here", url_input)
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
topic_of_interest = "Ukraine War"
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
### UI OUTPUT HERE
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"sentence1",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518171512.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:15000]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518180554.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518172459.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt.split('[')[1])
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160851.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage="colorful socks")
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
"colorful socks"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160703.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template=r"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(product="colorful socks")
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"colorful socks",
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \\n webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623010835.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input_example = "https://news.yahoo.com"
# url_input_example = "https://laion.ai/blog/" # OK
# url_input_example = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input_example = "https://www.theguardian.com/international" #OK
# url_input_example = "https://www.bloomberg.com/europe" #NOK
# url_input_example = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
url_to_watch = st.text_input("Input your URL here", url_input_example)
topic_of_interest = st.text_input("What is your topic of interest ?","Ukraine War")#UI
### USER INPUT HERE
if validators.url(url_input_example):
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
st.header("ERROR : The URL is not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
if st.button("Process"):
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
### UI OUTPUT HERE
st.json(output_json)
for j in output_json:
print(j)
# st.header(article["title"])
# st.text(article["date"])
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"sentence1",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518161350.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text.\
webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518171523.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:15000]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518170605.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.title(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518175014.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://www.nytimes.com/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | scrapping.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
from googlesearch import search
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#st.json(parsed_articles)
text_to_watch_for = st.text_input("What should we look for ?","ex. investments or Taiwan")#UI
for article in parsed_articles:
print("--------------------------")
print(article["title"])
query = article["title"]
for j in search(query, tld="co.in", num=1, stop=1, pause=2):
print(j)
st.header(article["title"])
st.text(article["date"])
#TODO : Do a google search limited to the websited given, of the articles, get their content
#TODO : Add a field to ask a quetion (maybe multiple choice field)
#TODO : Ask the article and the question to Chatgpt
#TODO : Display results to the user
#TODO :
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623004823.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
| [] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518175055.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://www.nytimes.com/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")1
print(result_from_chatgpt)
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518172650.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt.split('\\[')[1])
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160126.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
file_path = "output.txt"
# Open the file in write mode and write the text content
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005130.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title):
search = article_title
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0.9)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
prompt_news = "In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
parsed_articles = json.loads(result_from_chatgpt)
for article in parsed_articles:
print("--------------------------")
print(article["title"])
query = article["title"]
for j in search(query, tld="co.in", num=1, stop=1, pause=2):
print(j)
st.header(article["title"])
st.text(article["date"])
st.json(json.dumps(json.loads(result_from_chatgpt), indent=4))
| [
"In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text.",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"prompt_text"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518175539.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://www.latimes.com/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160235.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160157.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Web Scrapping
file_path = "output.txt"
# Open the file in write mode and write the text content
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623010123.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input = "https://news.yahoo.com"
# url_input = "https://laion.ai/blog/" # OK
# url_input = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input = "https://www.theguardian.com/international" #OK
# url_input = "https://www.bloomberg.com/europe" #NOK
# url_input = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
### USER INPUT HERE
if validators.url(url_input):
url_to_watch = st.text_input("Input your URL here", url_input)
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
topic_of_interest = st.text_input("What is your topic of interest ?","Ukraine War")#UI
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
st.json(output_json)
### UI OUTPUT HERE
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"sentence1",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623004953.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title):
search = article_title
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0.9)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
prompt_news = "In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
print(json.dumps(json.loads(result_from_chatgpt), indent=4))
#print(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text.",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"prompt_text"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518170516.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.title(article["title"])
st.title(article["title"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518170209.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
print(dotenv_values(".env"))
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key="OPENAI_API_KEY",temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
#st.write(article["title"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518174625.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518175100.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://www.nytimes.com/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518171509.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
lst = text_from_webpage[:15000]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005939.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input = "https://news.yahoo.com"
# url_input = "https://laion.ai/blog/" # OK
# url_input = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input = "https://www.theguardian.com/international" #OK
# url_input = "https://www.bloomberg.com/europe" #NOK
# url_input = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
### USER INPUT HERE
if validators.url(url_input):
url_to_watch = st.text_input("Input your URL here", url_input)
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
topic_of_interest = "Ukraine War"
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
st.json(output_json)
### UI OUTPUT HERE
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"sentence1",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518175347.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://www.nytimes.com/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160748.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(product="colorful socks")
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"colorful socks",
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518164708.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_data = json.loads(result_from_chatgpt)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005121.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title):
search = article_title
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0.9)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
prompt_news = "In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
parsed_articles = json.loads(result_from_chatgpt)
for article in parsed_articles:
print("--------------------------")
print(article["title"])
query = article["title"]
for j in search(query, tld="co.in", num=1, stop=1, pause=2):
print(j)
st.header(article["title"])
st.text(article["date"])
st.json(json.dumps(json.loads(result_from_chatgpt), indent=4))
print(json.dumps(json.loads(result_from_chatgpt), indent=4))
#print(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text.",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"prompt_text"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160321.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles, and their publication dates. in Json format. webpage : {webpage}?",)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [
"In this web page, can you find a pattern, list all the articles, and their publication dates. in Json format. webpage : {webpage}?"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160230.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
# Open the file in write mode and write the text content
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160012.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
file_path = "output.txt"
# Open the file in write mode and write the text content
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005115.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title):
search = article_title
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0.9)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
prompt_news = "In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
parsed_articles = json.loads(result_from_chatgpt)
st.json(json.dumps(json.loads(result_from_chatgpt), indent=4))
for article in parsed_articles:
print("--------------------------")
print(article["title"])
query = article["title"]
for j in search(query, tld="co.in", num=1, stop=1, pause=2):
print(j)
st.header(article["title"])
st.text(article["date"])
print(json.dumps(json.loads(result_from_chatgpt), indent=4))
#print(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text.",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"prompt_text"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518155958.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
file_path = "output.txt"
# Open the file in write mode and write the text content
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518175213.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://www.nytimes.com/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"published\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"published\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518170857.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \'date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", 'date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518165443.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
#st.write(article["title"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518164849.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
for article in parsed_articles:
st.write(article["title"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005639.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title):
search = article_title
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0.9)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
prompt_news = "In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
parsed_articles = json.loads(result_from_chatgpt)
for article in parsed_articles:
print("--------------------------")
print(article["title"])
query = article["title"]
for j in search(query, tld="co.in", num=1, stop=1, pause=2):
print(j)
st.header(article["title"])
st.text(article["date"])
st.json(json.dumps(json.loads(result_from_chatgpt), indent=4))
| [
"In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text.",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"prompt_text"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518170900.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160858.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518161605.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text.\
webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160336.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. in Json format. webpage : {webpage}",)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. in Json format. webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160114.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
file_path = "output.txt"
# Open the file in write mode and write the text content
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518161229.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text.\
webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518161842.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518171759.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:15000]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518175241.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://www.nytimes.com/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518171250.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
print(f'AAAAAAAAAAAAAAAAAAAAAAAAAAa {len(text_from_webpage)}')
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005032.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title):
search = article_title
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0.9)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
prompt_news = "In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
st.json(json.dumps(json.loads(result_from_chatgpt), indent=4))
print(json.dumps(json.loads(result_from_chatgpt), indent=4))
#print(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text.",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"prompt_text"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160408.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. in Json format. webpage : {webpage}",)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(product="colorful socks")
print(llm(prompt_to_send))
st.write('The current movie title is')
| [
"colorful socks",
"In this web page, can you find a pattern, list all the articles and their publication dates. in Json format. webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518164332.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 5. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 5. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518170632.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160929.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160939.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text.\
webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518172742.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518172653.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt.split('\\[')[1])
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518165318.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
#st.write(article["title"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160327.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles, and their publication dates. in Json format. webpage : {webpage}",)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [
"In this web page, can you find a pattern, list all the articles, and their publication dates. in Json format. webpage : {webpage}"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230519010102.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
#TODO : Do a google search limited to the websited given, of the articles, get their content
#TODO : Add a field to ask a quetion (maybe multiple choice field)
#TODO : Ask the article and the question to Chatgpt
#TODO : Display results to the user
#TODO :
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623010227.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input_example = "https://news.yahoo.com"
# url_input_example = "https://laion.ai/blog/" # OK
# url_input_example = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input_example = "https://www.theguardian.com/international" #OK
# url_input_example = "https://www.bloomberg.com/europe" #NOK
# url_input_example = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
### USER INPUT HERE
if validators.url(url_input_example):
url_to_watch = st.text_input("Input your URL here", url_input_example)
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
topic_of_interest = st.text_input("What is your topic of interest ?","Ukraine War")#UI
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
st.json(output_json)
### UI OUTPUT HERE
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"sentence1",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623010114.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input = "https://news.yahoo.com"
# url_input = "https://laion.ai/blog/" # OK
# url_input = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input = "https://www.theguardian.com/international" #OK
# url_input = "https://www.bloomberg.com/europe" #NOK
# url_input = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
### USER INPUT HERE
if validators.url(url_input):
url_to_watch = st.text_input("Input your URL here", url_input)
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
topic_of_interest = "Ukraine War"
text_to_watch_for = st.text_input("What is your topic of interest ?","Ukraine War")#UI
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
st.json(output_json)
### UI OUTPUT HERE
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"sentence1",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518172707.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt.split('\\[')[0])
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518171321.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://www.lemonde.fr/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
print(f'AAAAAAAAAAAAAAAAAAAAAAAAAAa {len(text_from_webpage)}')
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518180145.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160144.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
file_path = "output.txt"
# Open the file in write mode and write the text content
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
llm = OpenAI(temperature=0.9)
text = "What would be a good company name for a company that makes colorful socks?"
print(llm(text))
st.write('The current movie title is')
| [
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005656.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
prompt_news = "In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
parsed_articles = json.loads(result_from_chatgpt)
for article in parsed_articles:
print("--------------------------")
print(article["title"])
query = article["title"]
for j in search(query, tld="co.in", num=1, stop=1, pause=2):
print(j)
st.header(article["title"])
st.text(article["date"])
st.json(json.dumps(json.loads(result_from_chatgpt), indent=4))
| [
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"In this web page, can you find a pattern, list all the article titles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text.",
"sentence1",
"prompt_text",
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n "
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518170334.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.write(article["title"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518170351.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.write(article["title"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518164523.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518163237.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 5. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 5. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230519010612.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
HARD_LIMIT_CHAR = 10000
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
print(result_from_chatgpt)
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#st.json(parsed_articles)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
#TODO : Do a google search limited to the websited given, of the articles, get their content
#TODO : Add a field to ask a quetion (maybe multiple choice field)
#TODO : Ask the article and the question to Chatgpt
#TODO : Display results to the user
#TODO :
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Do not mix the date with the reading time. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623010721.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input_example = "https://news.yahoo.com"
# url_input_example = "https://laion.ai/blog/" # OK
# url_input_example = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input_example = "https://www.theguardian.com/international" #OK
# url_input_example = "https://www.bloomberg.com/europe" #NOK
# url_input_example = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
url_to_watch = st.text_input("Input your URL here", url_input_example)
### USER INPUT HERE
if validators.url(url_input_example):
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
st.header("ERROR : The URL is not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
topic_of_interest = st.text_input("What is your topic of interest ?","Ukraine War")#UI
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
### UI OUTPUT HERE
st.json(output_json)
for j in output_json:
print(j)
# st.header(article["title"])
# st.text(article["date"])
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"sentence1",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623010745.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input_example = "https://news.yahoo.com"
# url_input_example = "https://laion.ai/blog/" # OK
# url_input_example = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input_example = "https://www.theguardian.com/international" #OK
# url_input_example = "https://www.bloomberg.com/europe" #NOK
# url_input_example = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
url_to_watch = st.text_input("Input your URL here", url_input_example)
topic_of_interest = st.text_input("What is your topic of interest ?","Ukraine War")#UI
### USER INPUT HERE
if validators.url(url_input_example):
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
st.header("ERROR : The URL is not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
### UI OUTPUT HERE
st.json(output_json)
for j in output_json:
print(j)
# st.header(article["title"])
# st.text(article["date"])
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"sentence1",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623010413.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input_example = "https://news.yahoo.com"
# url_input_example = "https://laion.ai/blog/" # OK
# url_input_example = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input_example = "https://www.theguardian.com/international" #OK
# url_input_example = "https://www.bloomberg.com/europe" #NOK
# url_input_example = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
### USER INPUT HERE
if validators.url(url_input_example):
url_to_watch = st.text_input("Input your URL here", url_input_example)
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
st.header("ERROR : The URL is not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
topic_of_interest = st.text_input("What is your topic of interest ?","Ukraine War")#UI
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
#else: print("not relevant")
empty_list.append(new_item)
output_json = json.dumps(empty_list, indent=4)
st.json(output_json)
### UI OUTPUT HERE
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"sentence1",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518164833.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
for article in parsed_articles :
st.write(article["title"])
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518170321.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
#st.write(article["title"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | gui.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
import hashlib
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input_example = "https://news.yahoo.com"
# url_input_example = "https://laion.ai/blog/" # OK
# url_input_example = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input_example = "https://www.theguardian.com/international" #OK
# url_input_example = "https://www.bloomberg.com/europe" #NOK
# url_input_example = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
key_pass = st.text_input("Input your keypass here")
password_is_correct = False;
hashed = hashlib.md5(key_pass.encode())
if hashed.hexdigest() == "a525a0ff61ed4cd44e2068f7c71cad4b":
password_is_correct = True;
url_to_watch = st.text_input("Input your URL here", url_input_example)
topic_of_interest = st.text_input("What is your topic of interest ?","Ukraine War")#UI
### USER INPUT HERE
if validators.url(url_input_example):
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
st.header("ERROR : The URL is not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
if st.button("Process") and password_is_correct:
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
#with open('final_output.json', 'w') as f:
# print("The json file is created")
### USER INPUT HERE
#topic_of_interest = "Should AI be open sourced?"
empty_list = []
i = 0
for item in json.loads(result_from_chatgpt_processed):
i+=1
output_filename = "article_text"+str(i)+".txt"
article_title = item['title']
article_link = get_link_based_on_article_name_via_google(article_title, url_to_watch)
new_item = {
'title': item['title'],
'metadata': item['metadata'],
'link': article_link,
}
relation_exists = prompt_similarity_to_llm_response(article_title,topic_of_interest)
if relation_exists == "strongly agree" or relation_exists == "agree" :
article_text = process_website(article_link, output_filename)
# Summarize article
prompt_article = "Summarize the following text in 3 sentences: "
article_summary = prompt_to_llm_response(article_text,prompt_article)
# Answer the question
prompt_content = "If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence".format(topic_of_interest)
user_question_answer = prompt_to_llm_response(article_text,prompt_content)
new_item["summary"]=article_summary
new_item["answer"]=user_question_answer
new_item["related?"]=relation_exists
empty_list.append(new_item)
#else: print("not relevant")
output_json = json.dumps(empty_list, indent=4)
### UI OUTPUT HERE
if len(empty_list)==0:
st.text("No relevant article found")
else:
st.text("Here are the articles related to the question above, in the latest five articles.")
st.json(output_json)
for j in output_json:
pass
#print(j)
# st.header(article["title"])
# st.text(article["date"])
with open("output.json", "w") as outfile:
outfile.write(output_json)
| [
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"sentence2",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"If user input is a question provide an answer, otherwise summarise content relevant to the input topic. Answer in one sentence",
"prompt_text",
"sentence1",
"Summarize the following text in 3 sentences: ",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160630.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(product="colorful socks")
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
"colorful socks"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160426.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. webpage : {webpage}",)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(product="colorful socks")
print(llm(prompt_to_send))
st.write('The current movie title is')
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. webpage : {webpage}",
"colorful socks"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160744.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(product="colorful socks")
result_from_chatgpt = llm(prompt_to_send)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
"colorful socks",
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005851.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input = "https://news.yahoo.com"
# url_input = "https://laion.ai/blog/" # OK
# url_input = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input = "https://www.theguardian.com/international" #OK
# url_input = "https://www.bloomberg.com/europe" #NOK
# url_input = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
### USER INPUT HERE
if validators.url(url_input):
url_to_watch = st.text_input("Input your URL here", url_input)
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
file_path = "gpt_out.txt"
parsed_articles = json.loads(result_from_chatgpt)
#Logging
file_path = "output_gpt.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
| [
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"sentence2",
"prompt_text",
"sentence1",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518171723.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
from dotenv import dotenv_values
#Setup env vars :
load_dotenv()
env_vars = dotenv_values(".env")
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#TODO : Fixe this limit, in a smarter way
text_from_webpage = text_from_webpage[:15000]
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. \
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'],temperature=0.9)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
st.header(article["title"])
st.text(article["date"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 3. In Json format, using these keys \"title\", \"date\". No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160457.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(product="colorful socks")
result_from_chatgpt = llm(prompt_to_send))
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
"colorful socks"
] |
2024-01-10 | ilisparrow/llm_tests | .history~gui_20230623005814.py | ## IMPORTS
from bs4 import BeautifulSoup # Importing BeautifulSoup for HTML parsing
from bs4.element import Comment # Importing Comment class for extracting comments from HTML
import urllib.request # Importing urllib.request for making HTTP requests
import streamlit as st # Importing streamlit for building interactive web apps
import os # Importing os for accessing operating system functionalities
from dotenv import load_dotenv # Importing load_dotenv for loading environment variables
from langchain.llms import OpenAI # Importing OpenAI class from langchain.llms module
from langchain.prompts import PromptTemplate # Importing PromptTemplate class from langchain.prompts module
import json # Importing json module for working with JSON data
from dotenv import dotenv_values # Importing dotenv_values for loading environment variables from .env file
from googlesearch import search # Importing search function from googlesearch module
import requests # Importing requests module for making HTTP requests
import unicodedata
import validators
## SETUP ENVIRONMENT VARIABLES
load_dotenv()
env_vars = dotenv_values(".env")
## Define system relevant input data for application
HARD_LIMIT_CHAR = 10000
## Functions
def tag_visible(element):
excluded_tags = ['a', 'style', 'script', 'head', 'title', 'meta', '[document]']
if element.parent.name in excluded_tags:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=tag_visible)
visible_texts = [t.strip() for t in texts if t.strip()]
return " ".join(visible_texts)
def extract_json_values(input_str):
results = []
while input_str:
try:
value = json.loads(input_str)
input_str = ""
except json.decoder.JSONDecodeError as exc:
if str(exc).startswith("Expecting value"):
input_str = input_str[exc.pos+1:]
continue
elif str(exc).startswith("Extra data"):
value = json.loads(input_str[:exc.pos])
input_str = input_str[exc.pos:]
results.append(value)
return results
## Process website and save content to file
def process_website(url, output_file_name):
html = urllib.request.urlopen(url).read()
text_from_webpage = text_from_html(html)
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
# Logging
file_path = output_file_name
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
return text_from_webpage
def get_link_based_on_article_name_via_google(article_title, url_to_watch):
search = article_title + " " + url_to_watch
url = 'https://www.google.com/search'
headers = {
'Accept' : '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82',
}
parameters = {'q': search}
content = requests.get(url, headers = headers, params = parameters).text
soup = BeautifulSoup(content, 'html.parser')
search = soup.find(id = 'search')
first_link = search.find('a')
article_link= first_link['href']
return first_link['href']
def prompt_to_llm_response(text_from_webpage, prompt_input):
prompt = PromptTemplate(
input_variables=["webpage", "prompt_text"],
template="\"{prompt_text}\" \
webpage : \"{webpage}\"",
)
prompt_to_send = prompt.format(webpage=text_from_webpage, prompt_text=prompt_input)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","")
return result_from_chatgpt
def prompt_similarity_to_llm_response(sentence1, sentence2):
prompt = PromptTemplate(
input_variables=["sentence1", "sentence2"],
template="""
Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2?
Answer with one of [strongly agree, agree, disagree, strongly disagree] only.
Sentence 1: {sentence1}
Sentence 2: {sentence2}
""",
)
prompt_to_send = prompt.format(sentence1=sentence1, sentence2=sentence2)
llm = OpenAI(openai_api_key=env_vars['OPENAI_API_KEY'], temperature=0)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "").replace("Answer:","").lower()
return result_from_chatgpt
## Web Scrapping
url_input = "https://news.yahoo.com"
# url_input = "https://laion.ai/blog/" # OK
# url_input = "https://www.euronews.com/tag/artificial-intelligence" # NOK
# url_input = "https://www.theguardian.com/international" #OK
# url_input = "https://www.bloomberg.com/europe" #NOK
# url_input = "https://news.google.com/home?hl=en-US&gl=US&ceid=US:en" # OK
### USER INPUT HERE
if validators.url(url_input):
url_to_watch = st.text_input("Input your URL here", url_input)
## Process website and save content to file
text_from_webpage = process_website(url_to_watch, "output.txt")
text_from_webpage = text_from_webpage[:HARD_LIMIT_CHAR]
else:
print("URL not valid")
### UI OUTPUT HERE
#st.write("URL not valid")
prompt_news = "Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
result_from_chatgpt = prompt_to_llm_response(text_from_webpage,prompt_news)
result_from_chatgpt_processed = result_from_chatgpt.encode('ascii', 'ignore')
print(json.dumps(json.loads(result_from_chatgpt_processed), indent=4))
#print(result_from_chatgpt)
for article in result_from_chatgpt_processed:
print("--------------------------")
print(article["title"])
query = article["title"]
for j in search(query, tld="co.in", num=1, stop=1, pause=2):
print(j)
st.header(article["title"])
st.text(article["date"])
st.json(json.dumps(json.loads(result_from_chatgpt), indent=4))
| [
"\n Compare the content of the following two sentences. Could sentence 1 be relevant for a person interested in sentence 2? \n Answer with one of [strongly agree, agree, disagree, strongly disagree] only.\n\n Sentence 1: {sentence1}\n Sentence 2: {sentence2}\n ",
"\"{prompt_text}\" webpage : \"{webpage}\"",
"sentence2",
"prompt_text",
"sentence1",
"Below is an html version of a news website. It contains news articles. Find the titles of news articles on this website. Do not make up article titles. List all the article titles and their metadata if it exists like date or author. Limit yourself to the first 5. In JSON format, using these keys \"title\", \"metadata\". No Other text."
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518160432.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text)
print("Variable content saved to the file:", file_path)
#LLM part
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",)
llm = OpenAI(temperature=0.9)
prompt_to_send = prompt.format(product="colorful socks")
print(llm(prompt_to_send))
st.write('The current movie title is')
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. In Json format. No Other text. \n webpage : {webpage}",
"colorful socks"
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518163144.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 5. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.7)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 5. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518164423.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
#Setup env vars :
load_dotenv()
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
st.write(result_from_chatgpt)
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
2024-01-10 | ilisparrow/llm_tests | .history~scrapping_20230518170200.py | from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.request
import streamlit as st
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import json
#Setup env vars :
load_dotenv()
print(dotenv_values(".env"))
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = BeautifulSoup(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
#TODO : DO URL Check and show message when not valid
#Web Scrapping and
url_to_watch = st.text_input("Input your url here","https://laion.ai/blog/")#UI
html = urllib.request.urlopen(url_to_watch).read()
text_from_webpage = text_from_html(html)
#Logging
file_path = "output.txt"
with open(file_path, "w") as file:
file.write(text_from_webpage)
print("Variable content saved to the file:", file_path)
#LLM part
#if st.button('Analyze'):
prompt = PromptTemplate(
input_variables=["webpage"],
template="In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text.\
webpage : \"{webpage}\"",
)
llm = OpenAI(openai_api_key="OPENAI_API_KEY",temperature=0.0)
prompt_to_send = prompt.format(webpage=text_from_webpage)
result_from_chatgpt = llm(prompt_to_send).replace("\n", "")
file_path = "gpt_out.txt"
with open(file_path, "w") as file:
file.write(result_from_chatgpt)
print("Variable content saved to the file:", file_path)
load_dotenv()
parsed_articles = json.loads(result_from_chatgpt)
json_object = json.dumps(parsed_articles, indent=4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
for article in parsed_articles:
print(article["title"])
#st.write(article["title"])
| [
"In this web page, can you find a pattern, list all the articles and their publication dates. Limit yourself to the first 2. In Json format. No Other text. webpage : \"{webpage}\""
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.