Spaces:
Runtime error
Runtime error
File size: 6,671 Bytes
67a07af 3718635 67a07af 3718635 67a07af 5cb32a7 ea3c78a 12a0383 67a07af a7ba934 67a07af a7ba934 67a07af a7ba934 67a07af 7c73f96 7d81e9f 12a0383 67a07af ea3c78a 67a07af ea3c78a 67a07af ea3c78a f8c3650 67a07af 923401d 67a07af ccb61fd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import time
import os
import json
import gradio as gr
from openai import OpenAI
OPENAI_API_KEY = os.getenv("OPEN_AI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
OPEN_AI_MODEL = "gpt-4-1106-preview"
# thread = gr.State(client.beta.threads.create())
# thread_id = None
def wait_on_run(run, thread):
while run.status == "queued" or run.status == "in_progress":
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id,
)
time.sleep(0.5)
return run
def get_openai_assistant(assistant_name, instructions, model=OPEN_AI_MODEL):
assistant = client.beta.assistants.create(
name=assistant_name,
instructions=instructions,
model=model,
)
return assistant.id
def show_json(obj):
json.loads(obj.model_dump_json())
def abstract_assistant(pre_context_to_the_instruction, instruction, name, thread_id, thread, query):
assistant_id = get_openai_assistant(name, pre_context_to_the_instruction + instruction)
message = client.beta.threads.messages.create(
thread_id=thread_id, role="user", content=query)
run = client.beta.threads.runs.create(
thread_id=thread_id,
assistant_id=assistant_id,
)
wait_on_run(run, thread)
messages = client.beta.threads.messages.list(
thread_id=thread_id, order="asc", after=message.id
)
data = json.loads(messages.model_dump_json())
response = data['data'][0]['content'][0]['text']['value']
return response
def get_response_for_case_t0(thread_id, query, question_text, input, output, example, thread):
pre_context_to_the_instruction = f"""```QUESTION:{question_text}```\n```INPUT:{input}```\n```OUTPUT:{output}```\n```EXAMPLE:{example}```"""
instruction = f"""Act as a Coding Mentor for a student who is currently learning Data Structures and algorithms(DSA). The student is asking a QUERY: {query} based on the
coding question : QUESTION which have input : INPUT output: OUTPUT and examples: EXAMPLE. The student have not implemented any code. The student might be asking
how to solve the question or what should be the coding approach or how to write the code/logic. Your task is to :
1.) Ask the student about what he/she is thinking about the problem statement or what logic the student will implement or what approach he/she will follow
to code the solution he/she is thinking.
2.) Please be in a motivational tone and never give student a solution approach just ask the students approach.
3.) Always answer in short and crunch way not more than 200 words. Always be to the point."""
response = abstract_assistant(pre_context_to_the_instruction,instruction, "GPT_t0", thread_id, thread, query)
return response
def opening_statement(thread_id, question_text, input, output, example, thread):
pre_context_to_the_instruction = f"""```QUESTION:{question_text}```\n```INPUT:{input}```\n```OUTPUT:{output}```\n```EXAMPLE:{example}```"""
instruction = f"""Act as a Coding Mentor for a student who is currently learning Data Structures and algorithms(DSA). Now the student
is stuck in the question for long amount of time so Ask him in a gentle motivational tone to tell or to start think where
he/she might be stuck or might be thinking. Output only one line not more than that be short and crunch!"""
query = ""
response = abstract_assistant(pre_context_to_the_instruction,instruction, "GPT_open", thread_id, thread, query)
return response
def response_evaluation_for_case_tx(thread_id, query, question_text, input, output, example, user_code, thread):
pre_context_to_the_instruction = f"""```QUESTION:{question_text}```\n```INPUT:{input}```\n```OUTPUT:{output}```\n```EXAMPLE:{example}```"""
instruction = f"""Act as a Coding Mentor for a student who is currently learning Data Structures and algorithms(DSA). The student is asking a QUERY: {query} based on the
coding question : QUESTION which have input : INPUT output: OUTPUT and examples: EXAMPLE. Now follow the following instrutions:
NEVER PROVIDE COMPLETE SOLUTION CODE AND ALL THE STEPS TO SOLVE IN ONE RESPONSE. BE SELECTIVE AND GIVE IMMEDIATE STEP ONLY ONE
* Always answer in short and crunch way not more than 200 words. Always be to the point
1.) Understand what user is thinking about to code.
2.) Take time to think and understand
3.) Analyse the Code written by the user : {user_code}
4.) Again take time to think and understand
5.) Now check if the explaination of the user is aligning with the code or not
6.) If not then suggest the user to align, by providing unblocking hints only!
7.) Never give complete solution logic or code to the student , never ! Always talk with the student let the student write code with small hints only and reach to a solution
8.) If user is doing wrong approach suggest correct approach slowly with step by st\ep hints only!
9.) At the end also suggest user about how to improve code and logic at the end"""
response = abstract_assistant(pre_context_to_the_instruction,instruction, "GPT_tx", thread_id, thread, query)
return response
def run_chat_in_all_cases(message, history, question_text,input, output, examples, code_written):
thread = client.beta.threads.create()
thread_id = thread.id
print(thread_id)
if not message and not code_written:
ai_message = opening_statement(thread_id, question_text, input, output, examples, thread)
if not code_written:
ai_message = get_response_for_case_t0(thread_id, message, question_text, input, output, examples, thread)
else:
ai_message = response_evaluation_for_case_tx(thread_id, message, question_text, input, output, examples, code_written, thread)
print({"question_text":question_text, "input":input, "output":output, "examples":examples,
"user_code":code_written, "query":message, "ai_message":ai_message})
return ai_message
additional_inputs=[
gr.Textbox(
label="Question Text",
max_lines=10,
interactive=True,
),
gr.Textbox(
label="Input",
max_lines=10,
interactive=True,
),
gr.Textbox(
label="Output",
max_lines=10,
interactive=True,
),
gr.Textbox(
label="Examples",
max_lines=10,
interactive=True,
),
gr.Code(
label="Code",
interactive=True,
)
]
gr.ChatInterface(
fn=run_chat_in_all_cases,
chatbot=gr.Chatbot(show_label=True, show_share_button=True, show_copy_button=True, likeable=True, layout="panel"),
additional_inputs=additional_inputs,
title="Mentor Mode",
concurrency_limit=20,
).launch(debug=True, auth=("issac_user", "hf_198ghgkap34")) |