hra's picture
Update app.py
2fb0ac1
raw
history blame
4.32 kB
import json
import requests
import gradio as gr
import random
import time
import os
import datetime
from datetime import datetime
import openai
import re
#print('for update')
HRA_TOKEN=os.getenv("HRA_TOKEN")
from langchain.agents import load_tools, Tool, initialize_agent
from langchain.llms import OpenAI
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
from langchain.agents import initialize_agent, Tool
from langchain import LLMChain
from langchain import PromptTemplate
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url_decodemprompts='https://us-central1-createinsightsproject.cloudfunctions.net/gethrahfprompts'
data={"prompt_type":'chatgpt_blog',"hra_token":HRA_TOKEN}
try:
r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers)
except requests.exceptions.ReadTimeout as e:
print(e)
#print(r.content)
prompt=str(r.content, 'UTF-8')
print(prompt)
template=prompt.split('SEPERATOR')[0]
querieslist=prompt.split('SEPERATOR')[1].split(',')
titleslist=prompt.split('SEPERATOR')[2].split(',')
def getblog(text_inp1,text_inp2):
print(text_inp1,text_inp2)
print(datetime.today().strftime("%d-%m-%Y"))
response_nsfw = requests.get('https://github.com/coffee-and-fun/google-profanity-words/raw/main/data/list.txt')
data_nsfw = response_nsfw.text
nsfwlist=data_nsfw.split('\n')
nsfwlowerlist=[]
for each in nsfwlist:
if each!='':
nsfwlowerlist.append(each.lower())
mainstring=text_inp1
foundnsfw=0
for each_word in nsfwlowerlist:
raw_search_string = r"\b" + each_word + r"\b"
match_output = re.search(raw_search_string, mainstring)
no_match_was_found = ( match_output is None )
if no_match_was_found:
foundnsfw=0
else:
foundnsfw=1
print(each_word)
break
if foundnsfw==1:
error_msg="Unsafe content found. Please try again with different keywords."
print(error_msg)
return(error_msg)
if text_inp2!='':
template_tmp=template
template_tmp=template_tmp.replace("{topic}",text_inp1).strip()
llm = OpenAI(temperature=0,openai_api_key=text_inp2)
prompt_template = PromptTemplate(
input_variables=["query"],
template=template_tmp
)
bloglist=[]
for each in querieslist:
query = each
llm_chain = LLMChain(prompt=prompt_template, llm=llm,verbose=True)
result=llm_chain.run(query)
#print(result)
bloglist.append(result)
blog=""+bloglist[len(bloglist)-1]+"\n\n"
for i in range(len(bloglist)-1):
temp=titleslist[i]+'\n'+bloglist[i]+'\n\n'
blog+=temp
print(blog)
mainstring=blog
foundnsfw=0
for each_word in nsfwlowerlist:
raw_search_string = r"\b" + each_word + r"\b"
match_output = re.search(raw_search_string, mainstring)
no_match_was_found = ( match_output is None )
if no_match_was_found:
foundnsfw=0
else:
foundnsfw=1
print(each_word)
break
if foundnsfw==1:
error_msg="Unsafe content found. Please try again with different keywords."
print(error_msg)
return(error_msg)
else:
return(blog)
else:
return "Enter OpenAPI key"
with gr.Blocks() as demo:
with gr.Row():
gr.Markdown("<h1><center>Everybody Can Blog</center></h1>")
gr.Markdown(
"""Everybody can blog. Just enter a topic/ keyword & get a full blog from the ChatGPT model. See examples for guidance. Experience the power of Prompt Engineering."""
)
with gr.Row():
with gr.Column():
textbox1 = gr.Textbox(placeholder="Enter topic/ keyword (1-3 words) to generate blog...", lines=1,label='Topic')
textbox2 = gr.Textbox(placeholder="Enter OpenAPI Key...", lines=1,label='OpenAPI Key')
with gr.Column():
btn = gr.Button("Generate")
output1 = gr.Textbox(lines=5,label='Blog')
btn.click(getblog,inputs=[textbox1,textbox2], outputs=[output1])
examples = gr.Examples(examples=['5G','Minimalism','Rock music','Electric Vehicles'],
inputs=[textbox1])
demo.launch()