piusanalytics commited on
Commit
6f17e06
·
1 Parent(s): 82eef58

completed app

Browse files
Files changed (1) hide show
  1. app.py +82 -0
app.py CHANGED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import google.generativeai as palm
2
+ import os
3
+ import langchain
4
+ from langchain.chains import LLMChain
5
+ from langchain.prompts import (
6
+ ChatPromptTemplate,
7
+ PromptTemplate,
8
+ SystemMessagePromptTemplate,
9
+ AIMessagePromptTemplate,
10
+ HumanMessagePromptTemplate,
11
+ )
12
+ from langchain.schema import (
13
+ AIMessage,
14
+ HumanMessage,
15
+ SystemMessage
16
+ )
17
+
18
+ from langchain.llms import GooglePalm
19
+ from langchain.memory import ConversationBufferWindowMemory
20
+ import gradio as gr
21
+
22
+ os.environ["GOOGLE_API_KEY"] = os.getenv['api_key']
23
+
24
+ # Establish LLM
25
+ llm = GooglePalm(temperature = 0.0,
26
+ top_k = 40,
27
+ top_p = 0.95,
28
+ max_output_tokens = 5000,
29
+ n = 1)
30
+
31
+ # Better Prompts
32
+ template = """Assistant is designed to enhance user provided prompts for large language models.
33
+ Use the following guidelines between backticks to improve prompts.
34
+
35
+ ```
36
+ Be specific: Clearly state your desired outcome or the type of information you're seeking. The more specific and focused your prompt is, the better chances you have of obtaining accurate and useful responses.
37
+
38
+ Provide context: Offer relevant background information to set the stage and ensure that the model understands the context of your request. This to generate more informed and context-aware responses.
39
+
40
+ Use explicit instructions: If you want the model to perform a specific task, make your instructions explicit. Specify the format, structure, or steps required for the response. For example, instead of asking for a "description," you could write a "step-by-step description" or a "brief summary."
41
+
42
+ Ask for pros and cons: If you're seeking an evaluation or comparison, explicitly ask for the advantages and disadvantages of a particular topic or approach.
43
+ ```
44
+
45
+ Return an enhanced version of the prompt provided by the human.
46
+
47
+ Human: {human_input}
48
+ Assistant:
49
+ """
50
+
51
+ prompt = PromptTemplate(
52
+ input_variables=['human_input'],
53
+ template=template
54
+ )
55
+
56
+ chatgpt_chain = LLMChain(
57
+ llm=llm,
58
+ prompt=prompt,
59
+ verbose=False,
60
+ memory=ConversationBufferWindowMemory(k=1),
61
+ )
62
+
63
+ def process_input(input_text):
64
+ # Process the input here and generate the output
65
+ output = chatgpt_chain.predict( human_input =input_text)
66
+ return output
67
+
68
+ # Create the input interface
69
+ input_textbox = gr.inputs.Textbox(label="Enter your Prompt")
70
+
71
+ # Create the output interface
72
+ output_textbox = gr.outputs.Textbox(label="Enhanced Prompt")
73
+
74
+ # Create the Gradio app
75
+ gr.Interface(
76
+ fn=process_input,
77
+ inputs=input_textbox,
78
+ outputs=output_textbox,
79
+ title="Auto Prompt Engineer",
80
+ description="Enter your prompt and get it evaluated and enchanced for Free!",
81
+ theme="huggingface"
82
+ ).launch()