hwhelper / app.py
sidthephysicskid's picture
testing url params
cc33446 verified
import gradio as gr
def test(x, request: gr.Request):
return request.query_params
gr.Interface(test, "textbox", "textbox").launch()
# from openai import OpenAI
# import gradio as gr
# import os, json
# # Attempt to load configuration from config.json
# try:
# with open('config.json') as config_file:
# config = json.load(config_file)
# OPENAI_API_KEY = config.get("OPENAI_API_KEY")
# SYSTEM_PROMPT = config.get("SYSTEM_PROMPT")
# except FileNotFoundError:
# # If config.json is not found, fall back to environment variables
# OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT")
# # Fallback to default values if necessary
# if not OPENAI_API_KEY:
# raise ValueError("OPENAI_API_KEY is not set in config.json or as an environment variable.")
# if not SYSTEM_PROMPT:
# SYSTEM_PROMPT = "This is a default system prompt."
# client = OpenAI(api_key=OPENAI_API_KEY)
# MODEL = "gpt-3.5-turbo"
# def predict(message):
# system_prompt = {
# "role": "system",
# "content": SYSTEM_PROMPT
# }
# history_openai_format = [system_prompt]
# history_openai_format.append({"role": "user", "content": message})
# response = client.chat.completions.create(model=MODEL,
# messages=history_openai_format,
# temperature=1.0,
# max_tokens=150,
# stream=False)
# return response.choices[0].message['content']
# # JavaScript function to get the question from URL on load
# js_on_load = """
# function() {
# const params = new URLSearchParams(window.location.search);
# const question = params.get("question") || "Enter your question here";
# return [question];
# }
# """
# with gr.Blocks() as app:
# with gr.Row():
# question_input = gr.Textbox(label="Your Question", placeholder="Enter your question here")
# submit_button = gr.Button("Submit")
# answer_output = gr.Textbox(label="Answer", interactive=False)
# submit_button.click(fn=predict, inputs=question_input, outputs=answer_output)
# app.load(js=js_on_load) # Load the question from URL on startup
# app.launch(share=True, debug=True)
###__________________________________________
# V0
# from openai import OpenAI
# import gradio as gr
# import os, json
# # Attempt to load configuration from config.json
# try:
# with open('config.json') as config_file:
# config = json.load(config_file)
# OPENAI_API_KEY = config.get("OPENAI_API_KEY")
# SYSTEM_PROMPT = config.get("SYSTEM_PROMPT")
# except FileNotFoundError:
# # If config.json is not found, fall back to environment variables
# OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT")
# # Fallback to default values if necessary
# if not OPENAI_API_KEY:
# raise ValueError("OPENAI_API_KEY is not set in config.json or as an environment variable.")
# if not SYSTEM_PROMPT:
# SYSTEM_PROMPT = "This is a default system prompt."
# client = OpenAI(api_key=OPENAI_API_KEY)
# system_prompt = {
# "role": "system",
# "content": SYSTEM_PROMPT
# }
# MODEL = "gpt-3.5-turbo"
# def predict(message, history):
# history_openai_format = [system_prompt]
# for human, assistant in history:
# history_openai_format.append({"role": "user", "content": human })
# history_openai_format.append({"role": "assistant", "content":assistant})
# history_openai_format.append({"role": "user", "content": message})
# response = client.chat.completions.create(model=MODEL,
# messages= history_openai_format,
# temperature=1.0,
# stream=True)
# partial_message = ""
# for chunk in response:
# if chunk.choices[0].delta.content:
# partial_message = partial_message + chunk.choices[0].delta.content
# yield partial_message
# gr.ChatInterface(predict).launch(share=True)