File size: 3,056 Bytes
d15c04c
 
 
 
 
 
 
 
 
 
 
 
b92d174
d15c04c
 
 
 
 
 
 
b92d174
d15c04c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8efcfe0
d15c04c
 
 
 
 
 
 
 
 
 
 
a402717
d15c04c
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import gradio as gr

import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, pipeline
from threading import Thread

model_id = "rasyosef/Llama-3.2-400M-Amharic-Instruct-Poems-Stories-Wikipedia"

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype=torch.float32,
    # device_map="cuda" if torch.cuda.is_available() else "cpu"
  )

llama3_am = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    eos_token_id=tokenizer.eos_token_id,
    # device_map="cuda" if torch.cuda.is_available() else "cpu"
  )

# Function that accepts a prompt and generates text 
def generate(message, chat_history, max_new_tokens=64):

  history = []

  for sent, received in chat_history:
    history.append({"role": "user", "content": sent})
    history.append({"role": "assistant", "content": received})

  history.append({"role": "user", "content": message})

  if len(tokenizer.apply_chat_template(history)) > 512:
    yield "chat history is too long"
  else:
    # Streamer
    streamer = TextIteratorStreamer(tokenizer=tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=300.0)
    thread = Thread(
        target=llama3_am,
        kwargs={
            "text_inputs":history,
            "max_new_tokens":max_new_tokens,
            "repetition_penalty":1.1,
            "streamer":streamer
            }
        )
    thread.start()

    generated_text = ""
    for word in streamer:
      generated_text += word
      response = generated_text.strip()

      yield response

# Chat interface with gradio
with gr.Blocks() as demo:
  gr.Markdown("""
  # Llama 3.2 400M Amharic Chatbot Demo
  """)

  tokens_slider = gr.Slider(8, 256, value=64, label="Maximum new tokens", info="A larger `max_new_tokens` parameter value gives you longer text responses but at the cost of a slower response time.")

  chatbot = gr.ChatInterface(
    chatbot=gr.Chatbot(height=400),
    fn=generate,
    additional_inputs=[tokens_slider],
    stop_btn=None,
    cache_examples=False,
    examples=[
        ["แˆฐแˆ‹แˆ"],
        ["แˆฐแˆ‹แˆแฃ แŠฅแŠ•แ‹ดแ‰ต แŠแˆ…?"],
        ["แŠ แŠ•แ‰ฐ แˆ›แŠแˆ…?"],
        ["แŒแŒฅแˆ แƒแแˆแŠ"],
        ["แˆตแˆˆ แ‹ญแ‰…แˆญแ‰ณ แŒแŒฅแˆ แŒปแแˆแŠ"],
        ["แŠ แŠ•แ‹ต แ‰ฐแˆจแ‰ต แŠ แŒซแ‹แ‰ฐแŠ"],
        ["แˆตแˆˆ แŒ…แ‰ฅแŠ“ แŠ แŠ•แ‰ แˆณ แ‰ฐแˆจแ‰ต แŠ•แŒˆแˆจแŠ"],
        ["แ‰€แˆแ‹ต แŠ•แŒˆแˆจแŠ"],
        ["แˆตแˆˆ แˆตแˆซ แŠ แŒฅแŠแ‰ต แŠ แŠ•แ‹ต แ‰€แˆแ‹ต แŠ•แŒˆแˆจแŠ"],
        ["แ‹ณแŒแˆ›แ‹Š แ‰ดแ‹Žแ‹ตแˆฎแˆต แˆ›แŠ• แŠแ‹?"],
        ["แ‹ณแŒแˆ›แ‹Š แˆแŠ’แˆแŠญ แˆ›แŠ• แŠแ‹?"],
        ["แˆตแˆˆ แŠ แ‹ฒแˆต แŠ แ‰ แ‰ฃ แ‹ฉแŠ’แ‰จแˆญแˆตแ‰ฒ แŒฅแ‰‚แ‰ต แŠฅแ‹แŠแ‰ณแ‹Žแ‰ฝแŠ• แŠ แŒซแ‹แ‰ฐแŠ"],
        ["แˆตแˆˆ แŒƒแ“แŠ• แŒฅแ‰‚แ‰ต แŠฅแ‹แŠแ‰ณแ‹Žแ‰ฝแŠ• แŠ•แŒˆแˆจแŠ"],
        ["แˆตแˆˆ แˆ›แ‹ญแŠญแˆฎแˆถแแ‰ต แŒฅแ‰‚แ‰ต แŠฅแ‹แŠแ‰ณแ‹Žแ‰ฝแŠ• แŠ•แŒˆแˆจแŠ"],
        ["แŒ‰แŒแˆ แˆแŠ•แ‹ตแŠ• แŠแ‹?"],
        ["แ‰ขแ‰ตแŠฎแ‹ญแŠ• แˆแŠ•แ‹ตแŠ• แŠแ‹?"],
      ]
  )

demo.queue().launch(debug=True)