File size: 2,966 Bytes
8b06302
 
 
 
 
 
 
 
c80b6ae
8b06302
5101f6f
8b06302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18e5178
8b06302
 
466bb4c
8b06302
 
 
2117db0
e2d351c
 
2117db0
8b06302
 
32c9bc1
e2d351c
8b06302
 
 
 
 
 
 
 
32c9bc1
8b06302
 
 
18e5178
8b06302
18e5178
8b06302
 
 
 
 
 
 
 
 
 
bdf4c85
8b06302
 
 
bdf4c85
8b06302
 
 
bdf4c85
8b06302
 
bdf4c85
8b06302
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import json
import requests
import gradio as gr
import random
import time
import os
import datetime
from datetime import datetime
import openai

#print('for update')

HRA_TOKEN=os.getenv("HRA_TOKEN")

from langchain.agents import load_tools, Tool, initialize_agent
from langchain.llms import OpenAI
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
from langchain.agents import initialize_agent, Tool
from langchain import LLMChain
from langchain import PromptTemplate



headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url_decodemprompts='https://us-central1-createinsightsproject.cloudfunctions.net/gethrahfprompts'

data={"prompt_type":'chatgpt_blog',"hra_token":HRA_TOKEN}
try:
    r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers)
except requests.exceptions.ReadTimeout as e:
    print(e)
#print(r.content)

prompt=str(r.content, 'UTF-8')
print(prompt)
template=prompt.split('SEPERATOR')[0]
querieslist=prompt.split('SEPERATOR')[1].split(',')
titleslist=prompt.split('SEPERATOR')[2].split(',')


def getblog(text_inp1,text_inp2):
  print(text_inp1,text_inp2)
  print(datetime.today().strftime("%d-%m-%Y"))
  if text_inp2!='':
      #openai.api_key = text_inp2
      template_tmp=template
      template_tmp=template_tmp.replace("{topic}",text_inp1).strip()
      llm = OpenAI(temperature=0,openai_api_key=text_inp2)
    
      prompt_template = PromptTemplate(
        input_variables=["query"],
        template=template_tmp
        )
    
      bloglist=[]
      for each in querieslist:
          query = each
    
    
          llm_chain = LLMChain(prompt=prompt_template, llm=llm,verbose=True)
          result=llm_chain.run(query)
          #print(result)
          bloglist.append(result)  
          
      blog=""+bloglist[len(bloglist)-1]+"\n\n"
      for i in range(len(bloglist)-1):
          temp=titleslist[i]+'\n'+bloglist[i]+'\n\n'
          blog+=temp
      print(blog)
      return(blog)
  else:
      return "Enter OpenAPI key"

with gr.Blocks() as demo:
    with gr.Row():
        gr.Markdown("<h1><center>Everybody Can Blog</center></h1>")
        gr.Markdown(
            """Everybody can blog. Just enter a topic/ keyword & get a full blog from the ChatGPT model. See examples for guidance. Experience the power of Prompt Engineering."""
            )
    with gr.Row():
        with gr.Column():
            textbox1 = gr.Textbox(placeholder="Enter topic/ keyword (1-3 words) to generate blog...", lines=1,label='Topic')
            textbox2 = gr.Textbox(placeholder="Enter OpenAPI Key...", lines=1,label='OpenAPI Key')
        with gr.Column():
            btn = gr.Button("Generate")    
            output1 = gr.Textbox(lines=10,label='Blog')

    btn.click(getblog,inputs=[textbox1,textbox2], outputs=[output1])
    examples = gr.Examples(examples=['5G','Minimalism','Rock music','Electric Vehicles'],
                           inputs=[textbox1])
    

demo.launch()