Abhishekcr448 commited on
Commit
2e320f4
·
verified ·
1 Parent(s): 778912a

Added application file

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ # Load the model and tokenizer
6
+ model, tokenizer = (
7
+ AutoModelForCausalLM.from_pretrained("Abhishekcr448/Tiny-Hinglish-Chat-21M"),
8
+ AutoTokenizer.from_pretrained("Abhishekcr448/Tiny-Hinglish-Chat-21M"),
9
+ )
10
+
11
+ # Function to generate text (suggestions)
12
+ def generate_text(prompt, output_length):
13
+ inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
14
+ generated_output = model.generate(
15
+ inputs['input_ids'],
16
+ max_length=inputs['input_ids'].shape[-1] + output_length, # Generate 10 more tokens
17
+ no_repeat_ngram_size=2,
18
+ temperature=0.8,
19
+ top_k=50,
20
+ top_p=0.9,
21
+ do_sample=True,
22
+ )
23
+ output_text = tokenizer.decode(generated_output[0], skip_special_tokens=True)
24
+ return output_text
25
+
26
+
27
+ # Set up the Gradio interface
28
+ with gr.Blocks() as demo:
29
+
30
+ # Add a chat interface above the text boxes
31
+ with gr.Column(scale=4):
32
+ chatbox = gr.Chatbot(label="Chat", type="messages")
33
+
34
+ with gr.Row():
35
+ # Create a column for the two text boxes
36
+ with gr.Column(scale=3):
37
+ # Input text box for user input (first column)
38
+ input_text = gr.Textbox(label="Enter your message", interactive=True)
39
+
40
+ # Response text box (second column)
41
+ response_text = gr.Textbox(label="Response", interactive=False)
42
+
43
+ # Create a separate column for the button
44
+ with gr.Column(scale=1):
45
+ # Button placed beside the text boxes
46
+ replace_button = gr.Button("Replace Text", elem_id="replace-btn")
47
+
48
+ # Set up the interaction between input and output
49
+ def validate_and_generate(prompt, output_length=4):
50
+ if prompt.strip():
51
+ return generate_text(prompt, output_length)
52
+
53
+ input_text.input(validate_and_generate, inputs=input_text, outputs=response_text)
54
+ replace_button.click(lambda x: x, inputs=response_text, outputs=input_text)
55
+
56
+ def chat_interaction(prompt, history):
57
+ if prompt.strip():
58
+ response = generate_text(prompt, output_length=10)
59
+
60
+ # Exclude the input prompt text from the response
61
+ response = response[len(prompt):].strip()
62
+ history.append({"role": "user", "content": prompt})
63
+ history.append({"role": "assistant", "content": response})
64
+
65
+ # Call validate_and_generate with the response
66
+ response_text_value = validate_and_generate(response, output_length=10)
67
+ return history, response_text_value[len(response):].strip(), ""
68
+
69
+ input_text.submit(chat_interaction, inputs=[input_text, chatbox], outputs=[chatbox, response_text, input_text])
70
+
71
+ # Launch the interface
72
+ demo.launch()