File size: 2,763 Bytes
0c03e17 ddeed03 0c03e17 ddeed03 0c03e17 3b8e387 0c03e17 3b8e387 0c03e17 ddeed03 0c03e17 3b8e387 0c03e17 3b8e387 0c03e17 ddeed03 0c03e17 ddeed03 0c03e17 ddeed03 0c03e17 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from threading import Thread
# Model Initialization
model_id = "rasyosef/Llama-3.2-180M-Amharic-Instruct"
st.title("Llama 3.2 180M Amharic Chatbot Demo")
st.write("""
This chatbot was created using [Llama-3.2-180M-Amharic-Instruct](https://huggingface.co/rasyosef/Llama-3.2-180M-Amharic-Instruct),
a finetuned version of the 180 million parameter Llama 3.2 Amharic transformer model.
""")
# Load the tokenizer and model
@st.cache_resource
def load_model():
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
llama_pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id
)
return tokenizer, llama_pipeline
tokenizer, llama_pipeline = load_model()
# Generate text
def generate_response(prompt, max_new_tokens):
response = llama_pipeline(
prompt,
max_new_tokens=max_new_tokens,
repetition_penalty=1.15
)
return response[0]['generated_text']
# Sidebar: Configuration
st.sidebar.header("Chatbot Configuration")
max_tokens = st.sidebar.slider("Maximum new tokens", min_value=8, max_value=256, value=64, help="Larger values result in longer responses.")
# Examples
examples = [
"แฐแแแฃ แฅแแดแต แแ
?",
"แจแขแตแฎแตแซ แแ แจแฐแ แตแ แแแตแ แแ?",
"แจแขแตแฎแตแซ แจแแจแจแปแ แแแต แแ แแ แฉ?",
"แจแ แแญแ แแฅแ แแแแ",
"แฐแจแต แแแจแ\n\nแ
แฅแ แ แแ แณ",
"แ แแต แ แตแแ แแแต แแแจแ",
"แจแแจแแณแญ แแ แจแฐแ แตแ แแแตแ แแ?",
"แ แแ แจแ แแชแซ แแฌแแณแแต แแ แแ?",
]
st.subheader("Chat with the Amharic Chatbot")
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
# Example selector
example = st.selectbox("Choose an example:", ["Type your own message"] + examples)
# User Input
user_input = st.text_input("Your message:", value=example if example != "Type your own message" else "", placeholder="Type your message here...")
if st.button("Send"):
if user_input:
# Generate response
with st.spinner("Generating response..."):
response = generate_response(user_input, max_tokens)
st.session_state.chat_history.append((user_input, response))
# Display Chat History
st.write("### Chat History")
for i, (user_msg, bot_response) in enumerate(st.session_state.chat_history):
st.write(f"**User {i+1}:** {user_msg}")
st.write(f"**Bot:** {bot_response}")
|