Spaces:
Runtime error
Runtime error
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""Untitled6.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1F6f_vJbssO7C2FM6FILWljFYacDmbVBY
|
8 |
+
"""
|
9 |
+
|
10 |
+
from IPython.display import display, HTML
|
11 |
+
|
12 |
+
# Inject CSS to enable wrapping
|
13 |
+
display(HTML('''
|
14 |
+
<style>
|
15 |
+
.output_area pre {
|
16 |
+
white-space: pre-wrap;
|
17 |
+
}
|
18 |
+
</style>
|
19 |
+
'''))
|
20 |
+
|
21 |
+
!pip install transformers gradio
|
22 |
+
|
23 |
+
import os
|
24 |
+
from google.colab import userdata
|
25 |
+
|
26 |
+
# Get the Hugging Face API key from Colab Secrets
|
27 |
+
api_key = userdata.get('HF_TOKEN')
|
28 |
+
|
29 |
+
# Ensure the API key is set before using it
|
30 |
+
if api_key is None:
|
31 |
+
raise ValueError("Hugging Face API key not found. Please ensure it is set in Colab Secrets.")
|
32 |
+
|
33 |
+
# Set the Hugging Face token as an environment variable
|
34 |
+
os.environ["HF_TOKEN"] = api_key
|
35 |
+
|
36 |
+
# Hugging Face libraries will automatically use this token for authentication
|
37 |
+
print("Hugging Face API key successfully loaded! You're good to go!")
|
38 |
+
|
39 |
+
# Now you can continue with your Hugging Face-related code
|
40 |
+
|
41 |
+
# Import necessary libraries
|
42 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
43 |
+
|
44 |
+
# Load model and tokenizer
|
45 |
+
model_name = "distilgpt2" # A lightweight, CPU-friendly model
|
46 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
47 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
48 |
+
|
49 |
+
# Define the function to generate a response
|
50 |
+
def generate_response(prompt):
|
51 |
+
# Tokenize the input prompt
|
52 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
53 |
+
# Generate a response
|
54 |
+
outputs = model.generate(
|
55 |
+
inputs.input_ids,
|
56 |
+
max_length=50,
|
57 |
+
do_sample=True, # Enable sampling
|
58 |
+
temperature=0.7, # Controls randomness
|
59 |
+
top_p=0.9, # Nucleus sampling
|
60 |
+
pad_token_id=tokenizer.eos_token_id
|
61 |
+
)
|
62 |
+
|
63 |
+
# Decode the output and set clean_up_tokenization_spaces to True to avoid warnings
|
64 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
65 |
+
return response
|
66 |
+
|
67 |
+
# Example usage
|
68 |
+
prompt = "I went to Safeway and I bought a"
|
69 |
+
response = generate_response(prompt)
|
70 |
+
print(response)
|
71 |
+
|
72 |
+
def persona_response(prompt, persona="I am a helpful assistant"):
|
73 |
+
full_prompt = f"{persona}. {prompt}"
|
74 |
+
return generate_response(full_prompt)
|
75 |
+
|
76 |
+
# Import Gradio
|
77 |
+
import gradio as gr
|
78 |
+
|
79 |
+
# Define Gradio interface function
|
80 |
+
def chat_interface(user_input, persona="I am a helpful assistant"):
|
81 |
+
return persona_response(user_input, persona)
|
82 |
+
|
83 |
+
# Set up Gradio interface
|
84 |
+
interface = gr.Interface(
|
85 |
+
fn=chat_interface,
|
86 |
+
inputs=["text", "text"], # Allows input for both prompt and persona
|
87 |
+
outputs="text",
|
88 |
+
title="Simple Chatbot",
|
89 |
+
description="Type something to chat with the bot! Add a persona to change its style, like 'I am a shopping assistant.'"
|
90 |
+
)
|
91 |
+
|
92 |
+
# Launch the Gradio interface in Colab
|
93 |
+
interface.launch(share=True) # share=True creates a public link
|
94 |
+
|
95 |
+
"""### Uploading to Hugging Face Spaces
|
96 |
+
|
97 |
+
Now that we have our chatbot working, here’s how to upload it to Hugging Face Spaces:
|
98 |
+
|
99 |
+
1. Go to [Hugging Face Spaces](https://huggingface.co/spaces).
|
100 |
+
2. Create a new Space, choose "Gradio" as the app type, and name it (e.g., "My Simple Chatbot").
|
101 |
+
3. Upload this notebook, along with any necessary files or model assets.
|
102 |
+
4. Set up your Space and click "Deploy." Your Gradio chatbot will now be live!
|
103 |
+
|
104 |
+
**Tips for Deployment:**
|
105 |
+
- Ensure you include any persona settings or customizations.
|
106 |
+
- Test the app after deployment to confirm it works as expected.
|
107 |
+
|
108 |
+
Once deployed, students can share the link with family and friends!
|
109 |
+
|
110 |
+
"""
|