File size: 4,342 Bytes
8b06302
 
 
 
 
 
 
 
c80b6ae
2fb0ac1
8b06302
5101f6f
8b06302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18e5178
8b06302
 
466bb4c
8b06302
 
811f257
 
 
 
 
 
 
 
 
2fb0ac1
811f257
 
2fb0ac1
 
 
 
 
 
811f257
2fb0ac1
 
 
7c44def
2fb0ac1
811f257
 
 
8b06302
e2d351c
 
2117db0
8b06302
 
32c9bc1
e2d351c
8b06302
 
 
 
 
 
 
 
32c9bc1
8b06302
 
 
18e5178
8b06302
18e5178
8b06302
 
6cecb02
2fb0ac1
6cecb02
 
2fb0ac1
 
 
 
 
 
6cecb02
2fb0ac1
 
 
6cecb02
2fb0ac1
6cecb02
 
 
 
8b06302
 
 
 
 
 
 
bdf4c85
8b06302
 
 
bdf4c85
8b06302
 
 
72d179e
8b06302
 
8603955
8b06302
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import json
import requests
import gradio as gr
import random
import time
import os
import datetime
from datetime import datetime
import openai
import re

#print('for update')

HRA_TOKEN=os.getenv("HRA_TOKEN")

from langchain.agents import load_tools, Tool, initialize_agent
from langchain.llms import OpenAI
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
from langchain.agents import initialize_agent, Tool
from langchain import LLMChain
from langchain import PromptTemplate



headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url_decodemprompts='https://us-central1-createinsightsproject.cloudfunctions.net/gethrahfprompts'

data={"prompt_type":'chatgpt_blog',"hra_token":HRA_TOKEN}
try:
    r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers)
except requests.exceptions.ReadTimeout as e:
    print(e)
#print(r.content)

prompt=str(r.content, 'UTF-8')
print(prompt)
template=prompt.split('SEPERATOR')[0]
querieslist=prompt.split('SEPERATOR')[1].split(',')
titleslist=prompt.split('SEPERATOR')[2].split(',')


def getblog(text_inp1,text_inp2):
  print(text_inp1,text_inp2)
  print(datetime.today().strftime("%d-%m-%Y"))
  
  response_nsfw = requests.get('https://github.com/coffee-and-fun/google-profanity-words/raw/main/data/list.txt')
  data_nsfw = response_nsfw.text
  nsfwlist=data_nsfw.split('\n')
  nsfwlowerlist=[]
  for each in nsfwlist:
      if each!='':
        nsfwlowerlist.append(each.lower())
  
  mainstring=text_inp1
  foundnsfw=0
  for each_word in nsfwlowerlist:
      raw_search_string = r"\b" + each_word + r"\b"
      match_output = re.search(raw_search_string, mainstring)
      no_match_was_found = ( match_output is None )
      if no_match_was_found:
        foundnsfw=0
      else:
        foundnsfw=1
        print(each_word)
        break
  
  if foundnsfw==1:
    error_msg="Unsafe content found. Please try again with different keywords."
    print(error_msg)
    return(error_msg)
      
  if text_inp2!='':
      template_tmp=template
      template_tmp=template_tmp.replace("{topic}",text_inp1).strip()
      llm = OpenAI(temperature=0,openai_api_key=text_inp2)
    
      prompt_template = PromptTemplate(
        input_variables=["query"],
        template=template_tmp
        )
    
      bloglist=[]
      for each in querieslist:
          query = each
    
    
          llm_chain = LLMChain(prompt=prompt_template, llm=llm,verbose=True)
          result=llm_chain.run(query)
          #print(result)
          bloglist.append(result)  
          
      blog=""+bloglist[len(bloglist)-1]+"\n\n"
      for i in range(len(bloglist)-1):
          temp=titleslist[i]+'\n'+bloglist[i]+'\n\n'
          blog+=temp
      print(blog)

      mainstring=blog
      foundnsfw=0
      for each_word in nsfwlowerlist:
          raw_search_string = r"\b" + each_word + r"\b"
          match_output = re.search(raw_search_string, mainstring)
          no_match_was_found = ( match_output is None )
          if no_match_was_found:
            foundnsfw=0
          else:
            foundnsfw=1
            print(each_word)
            break
      
      if foundnsfw==1:
        error_msg="Unsafe content found. Please try again with different keywords."
        print(error_msg)
        return(error_msg)
      else:
        return(blog)
  else:
      return "Enter OpenAPI key"

with gr.Blocks() as demo:
    with gr.Row():
        gr.Markdown("<h1><center>Everybody Can Blog</center></h1>")
        gr.Markdown(
            """Everybody can blog. Just enter a topic/ keyword & get a full blog from the ChatGPT model. See examples for guidance. Experience the power of Prompt Engineering."""
            )
    with gr.Row():
        with gr.Column():
            textbox1 = gr.Textbox(placeholder="Enter topic/ keyword (1-3 words) to generate blog...", lines=1,label='Topic')
            textbox2 = gr.Textbox(placeholder="Enter OpenAPI Key...", lines=1,label='OpenAPI Key')
        with gr.Column():
            btn = gr.Button("Generate")    
            output1 = gr.Textbox(lines=5,label='Blog')

    btn.click(getblog,inputs=[textbox1,textbox2], outputs=[output1])
    examples = gr.Examples(examples=['5G','Minimalism','Rock music','Electric Vehicles','Humanity vs Robots'],
                           inputs=[textbox1])
    

demo.launch()