# import google.generativeai as palm import os import langchain from langchain.chains import LLMChain from langchain_core.prompts import ( ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, ) from langchain_core.messages import ( AIMessage, HumanMessage, SystemMessage ) from langchain_community.llms import GooglePalm from langchain.memory import ConversationBufferWindowMemory import gradio as gr g_api_key = os.getenv('api_key') os.environ["GOOGLE_API_KEY"] = g_api_key # Establish LLM llm = GooglePalm(temperature = 0.0, top_k = 40, top_p = 0.95, max_output_tokens = 5000, n = 1) # Better Prompts template = """Assistant is designed to enhance user provided prompts for large language models. Use the following guidelines between backticks to improve prompts. ``` Be specific: Clearly state your desired outcome or the type of information you're seeking. The more specific and focused your prompt is, the better chances you have of obtaining accurate and useful responses. Provide context: Offer relevant background information to set the stage and ensure that the model understands the context of your request. This to generate more informed and context-aware responses. Use explicit instructions: If you want the model to perform a specific task, make your instructions explicit. Specify the format, structure, or steps required for the response. For example, instead of asking for a "description," you could write a "step-by-step description" or a "brief summary." Ask for pros and cons: If you're seeking an evaluation or comparison, explicitly ask for the advantages and disadvantages of a particular topic or approach. ``` Return an enhanced version of the prompt provided by the human. Human: {human_input} Assistant: """ prompt = PromptTemplate( input_variables=['human_input'], template=template ) chatgpt_chain = LLMChain( llm=llm, prompt=prompt, verbose=False, memory=ConversationBufferWindowMemory(k=1), ) def process_input(input_text): # Process the input here and generate the output output = chatgpt_chain.predict( human_input =input_text) return output # Create the input interface input_textbox = gr.Textbox(label="Enter your Prompt") # Create the output interface output_textbox = gr.Textbox(label="Enhanced Prompt") # Create the Gradio app gr.Interface( fn=process_input, inputs=input_textbox, outputs=output_textbox, title="Auto Prompt Engineer", description="Enter your prompt and get it evaluated and enchanced for Free!", theme="huggingface" ).launch()