sbicy commited on
Commit
3e9ef1a
·
verified ·
1 Parent(s): faaf2dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -39
app.py CHANGED
@@ -1,62 +1,45 @@
1
- # -*- coding: utf-8 -*-
2
- """Untitled6.ipynb
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1F6f_vJbssO7C2FM6FILWljFYacDmbVBY
8
- """
9
-
10
- # Import necessary libraries
11
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
12
 
13
- # Load model and tokenizer
14
- model_name = "distilgpt2" # A lightweight, CPU-friendly model
15
  tokenizer = AutoTokenizer.from_pretrained(model_name)
16
  model = AutoModelForCausalLM.from_pretrained(model_name)
17
 
18
  # Define the function to generate a response
19
  def generate_response(prompt):
20
- # Tokenize the input prompt
21
  inputs = tokenizer(prompt, return_tensors="pt")
22
- # Generate a response
23
  outputs = model.generate(
24
- inputs.input_ids,
25
- max_length=50,
26
- do_sample=True, # Enable sampling
27
- temperature=0.7, # Controls randomness
28
- top_p=0.9, # Nucleus sampling
29
- pad_token_id=tokenizer.eos_token_id
30
- )
31
-
32
- # Decode the output and set clean_up_tokenization_spaces to True to avoid warnings
33
  response = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
34
  return response
35
 
36
- # Example usage
37
- prompt = "I went to Safeway and I bought a"
38
- response = generate_response(prompt)
39
- print(response)
40
-
41
- def persona_response(prompt, persona="I am a helpful assistant"):
42
- full_prompt = f"{persona}. {prompt}"
43
  return generate_response(full_prompt)
44
 
45
- # Import Gradio
46
- import gradio as gr
47
-
48
  # Define Gradio interface function
49
- def chat_interface(user_input, persona="I am a helpful assistant"):
50
  return persona_response(user_input, persona)
51
 
52
- # Set up Gradio interface
53
  interface = gr.Interface(
54
  fn=chat_interface,
55
- inputs=["text", "text"], # Allows input for both prompt and persona
56
  outputs="text",
57
  title="Simple Chatbot",
58
- description="Type something to chat with the bot! Add a persona to change its style, like 'I am a shopping assistant.'"
59
  )
60
 
61
- # Launch the Gradio interface in Colab
62
- interface.launch(share=True) # share=True creates a public link
 
 
1
+ import os
 
 
 
 
 
 
 
 
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import gradio as gr
4
 
5
+ # Load the model and tokenizer
6
+ model_name = "distilgpt2"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
  # Define the function to generate a response
11
  def generate_response(prompt):
 
12
  inputs = tokenizer(prompt, return_tensors="pt")
 
13
  outputs = model.generate(
14
+ inputs.input_ids,
15
+ max_length=70,
16
+ do_sample=True,
17
+ temperature=0.6,
18
+ top_p=0.9,
19
+ repetition_penalty=1.2,
20
+ pad_token_id=tokenizer.eos_token_id
21
+ )
 
22
  response = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
23
  return response
24
 
25
+ # Persona-based response function
26
+ def persona_response(prompt, persona="You are a helpful talking dog that answers in short, simple phrases."):
27
+ full_prompt = f"{persona}: {prompt}"
 
 
 
 
28
  return generate_response(full_prompt)
29
 
 
 
 
30
  # Define Gradio interface function
31
+ def chat_interface(user_input, persona="You are a helpful talking dog that answers in short, simple phrases."):
32
  return persona_response(user_input, persona)
33
 
34
+ # Gradio interface setup
35
  interface = gr.Interface(
36
  fn=chat_interface,
37
+ inputs=["text", "text"],
38
  outputs="text",
39
  title="Simple Chatbot",
40
+ description="Chat with the bot! Add a persona like 'I am a shopping assistant.'"
41
  )
42
 
43
+ # Launch the Gradio app
44
+ if __name__ == "__main__":
45
+ interface.launch()