redael commited on
Commit
f7537f3
·
verified ·
1 Parent(s): af20c63

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -39
app.py CHANGED
@@ -1,48 +1,42 @@
 
 
1
  import gradio as gr
2
- from transformers import pipeline, set_seed
3
 
4
- # Initialize the Hugging Face pipeline
5
- model = GPT2LMHeadModel.from_pretrained('redael/model_udc')
6
 
7
- def generate_response(prompt, max_length=100, num_beams=5, temperature=0.5, top_p=0.9, repetition_penalty=4.0):
8
- # Prepare the prompt
9
- prompt = f"User: {prompt}\nAssistant:"
10
- responses = generator(prompt, max_length=max_length, num_return_sequences=1, num_beams=num_beams, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty)
11
- response = responses[0]['generated_text']
12
-
13
- # Post-processing to clean up the response
14
- response = response.split("Assistant:")[-1].strip()
15
- response_lines = response.split('\n')
16
- clean_response = []
17
- for line in response_lines:
18
- if "User:" not in line and "Assistant:" not in line:
19
- clean_response.append(line)
20
- response = ' '.join(clean_response)
21
- return response.strip()
22
 
23
- def chat_interface(user_input, history, system_message, max_tokens, temperature, top_p):
24
- response = generate_response(user_input, max_length=max_tokens, temperature=temperature, top_p=top_p)
25
- history.append((user_input, response))
26
- return history, history
27
 
28
- with gr.Blocks() as demo:
29
- gr.Markdown("# Chatbot using GPT")
 
 
30
 
31
- chatbot = gr.Chatbot()
32
- user_input = gr.Textbox(placeholder="Type your question here...", label="User Input")
33
- system_message = gr.Textbox(value="You are a friendly chatbot.", label="System Message")
34
- max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max New Tokens")
35
- temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
36
- top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Nucleus Sampling)")
37
- state = gr.State([])
 
 
 
 
 
 
 
38
 
39
- with gr.Row():
40
- clear = gr.Button("Clear")
41
- submit = gr.Button("Send")
42
 
43
- submit.click(chat_interface, [user_input, state, system_message, max_tokens, temperature, top_p], [chatbot, state])
44
- clear.click(lambda: None, None, chatbot)
45
- clear.click(lambda: [], None, state)
46
 
47
- if __name__ == "__main__":
48
- demo.launch()
 
1
+
2
+ import torch
3
  import gradio as gr
 
4
 
5
+ # Replace 'your-username/your-model-name' with the actual model name you uploaded to Hugging Face
6
+ model_name = 'redael/model_udc'
7
 
8
+ # Load the tokenizer and model from Hugging Face
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ model.to(device)
 
 
14
 
15
+ # Define the generate_response function
16
+ def generate_response(prompt, model=model, tokenizer=tokenizer, max_length=100, num_beams=5, temperature=0.5, top_p=0.9, repetition_penalty=4.0):
17
+ # Add context to the prompt
18
+ prompt = f"User: {prompt}\nAssistant:"
19
 
20
+ inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=512).to(device)
21
+ outputs = model.generate(
22
+ inputs['input_ids'],
23
+ max_length=max_length,
24
+ num_return_sequences=1,
25
+ pad_token_id=tokenizer.eos_token_id,
26
+ num_beams=num_beams,
27
+ temperature=temperature,
28
+ top_p=top_p,
29
+ repetition_penalty=repetition_penalty,
30
+ early_stopping=True
31
+ )
32
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+ return response.strip()
34
 
35
+ # Define the Gradio interface
36
+ def chatbot_interface(user_input):
37
+ return generate_response(user_input)
38
 
39
+ iface = gr.Interface(fn=chatbot_interface, inputs="text", outputs="text", title="Chatbot", description="Ask anything to the chatbot.")
 
 
40
 
41
+ # Launch the Gradio interface
42
+ iface.launch()