Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- gradio_app.py +8 -7
gradio_app.py
CHANGED
@@ -16,7 +16,6 @@ client = Client("http://20.83.177.108:8080")
|
|
16 |
# print(text)
|
17 |
|
18 |
|
19 |
-
|
20 |
def run_generation(user_text, top_p, temperature, top_k, max_new_tokens):
|
21 |
# Get the model and tokenizer, and tokenize the user text.
|
22 |
user_text = f"""You are an expert legal assistant with extensive knowledge about Indian law. Your task is to respond to the given query in a consice and factually correct manner. Also mention the relevant sections of the law wherever applicable.
|
@@ -24,7 +23,7 @@ def run_generation(user_text, top_p, temperature, top_k, max_new_tokens):
|
|
24 |
### Response: """
|
25 |
|
26 |
text = ""
|
27 |
-
for response in client.generate_stream(user_text, max_new_tokens=max_new_tokens,repetition_penalty=1.05):
|
28 |
if not response.token.special:
|
29 |
text += response.token.text
|
30 |
yield text
|
@@ -43,7 +42,8 @@ with gr.Blocks() as demo:
|
|
43 |
placeholder="What is the punishment for taking dowry. explain in detail.",
|
44 |
label="Question"
|
45 |
)
|
46 |
-
model_output = gr.Textbox(
|
|
|
47 |
button_submit = gr.Button(value="Submit")
|
48 |
|
49 |
with gr.Column(scale=1):
|
@@ -60,8 +60,9 @@ with gr.Blocks() as demo:
|
|
60 |
minimum=0.1, maximum=1.0, value=0.8, step=0.1, interactive=True, label="Temperature",
|
61 |
)
|
62 |
|
63 |
-
user_text.submit(run_generation, [
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
|
|
|
|
16 |
# print(text)
|
17 |
|
18 |
|
|
|
19 |
def run_generation(user_text, top_p, temperature, top_k, max_new_tokens):
|
20 |
# Get the model and tokenizer, and tokenize the user text.
|
21 |
user_text = f"""You are an expert legal assistant with extensive knowledge about Indian law. Your task is to respond to the given query in a consice and factually correct manner. Also mention the relevant sections of the law wherever applicable.
|
|
|
23 |
### Response: """
|
24 |
|
25 |
text = ""
|
26 |
+
for response in client.generate_stream(user_text, max_new_tokens=max_new_tokens, repetition_penalty=1.05):
|
27 |
if not response.token.special:
|
28 |
text += response.token.text
|
29 |
yield text
|
|
|
42 |
placeholder="What is the punishment for taking dowry. explain in detail.",
|
43 |
label="Question"
|
44 |
)
|
45 |
+
model_output = gr.Textbox(
|
46 |
+
label="AI Response", lines=10, interactive=False)
|
47 |
button_submit = gr.Button(value="Submit")
|
48 |
|
49 |
with gr.Column(scale=1):
|
|
|
60 |
minimum=0.1, maximum=1.0, value=0.8, step=0.1, interactive=True, label="Temperature",
|
61 |
)
|
62 |
|
63 |
+
user_text.submit(run_generation, [
|
64 |
+
user_text, top_p, temperature, top_k, max_new_tokens], model_output)
|
65 |
+
button_submit.click(run_generation, [
|
66 |
+
user_text, top_p, temperature, top_k, max_new_tokens], model_output)
|
67 |
|
68 |
+
demo.queue(max_size=32).launch(enable_queue=True)
|