File size: 2,662 Bytes
6f17e06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d39c66
74a08fd
 
6f17e06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbbf95d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# import google.generativeai as palm
import os
import langchain
from langchain.chains import LLMChain
from langchain.prompts import (
    ChatPromptTemplate,
    PromptTemplate,
    SystemMessagePromptTemplate,
    AIMessagePromptTemplate,
    HumanMessagePromptTemplate,
)
from langchain.schema import (
    AIMessage,
    HumanMessage,
    SystemMessage
)

from langchain.llms import GooglePalm
from langchain.memory import ConversationBufferWindowMemory
import gradio as gr

g_api_key =  os.getenv('api_key')

os.environ["GOOGLE_API_KEY"] = g_api_key

# Establish LLM
llm = GooglePalm(temperature = 0.0,
  top_k = 40,
  top_p = 0.95,
  max_output_tokens = 5000,
  n =  1)

# Better Prompts
template = """Assistant is designed to enhance user provided prompts for large language models.
Use the following guidelines between backticks to improve prompts.

```
Be specific: Clearly state your desired outcome or the type of information you're seeking. The more specific and focused your prompt is, the better chances you have of obtaining accurate and useful responses.

Provide context: Offer relevant background information to set the stage and ensure that the model understands the context of your request. This to generate more informed and context-aware responses.

Use explicit instructions: If you want the model to perform a specific task, make your instructions explicit. Specify the format, structure, or steps required for the response. For example, instead of asking for a "description," you could write a "step-by-step description" or a "brief summary."

Ask for pros and cons: If you're seeking an evaluation or comparison, explicitly ask for the advantages and disadvantages of a particular topic or approach.
```

Return an enhanced version of the prompt provided by the human.

Human: {human_input}
Assistant:
"""

prompt = PromptTemplate(
    input_variables=['human_input'], 
    template=template
)

chatgpt_chain = LLMChain(
    llm=llm, 
    prompt=prompt, 
    verbose=False, 
    memory=ConversationBufferWindowMemory(k=1),
)

def process_input(input_text):
    # Process the input here and generate the output
    output = chatgpt_chain.predict( human_input =input_text)
    return output

# Create the input interface
input_textbox = gr.inputs.Textbox(label="Enter your Prompt")

# Create the output interface
output_textbox = gr.outputs.Textbox(label="Enhanced Prompt")

# Create the Gradio app
gr.Interface(
    fn=process_input,
    inputs=input_textbox,
    outputs=output_textbox,
    title="Auto Prompt Engineer",
    description="Enter your prompt and get it evaluated and enchanced for Free!",
    theme="huggingface"
).launch()