Spaces:
Sleeping
Sleeping
Switch to float16.
Browse files
app.py
CHANGED
@@ -12,23 +12,23 @@ DEFAULT_MAX_NEW_TOKENS = 1024
|
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
-
#
|
16 |
|
17 |
-
This Space demonstrates
|
18 |
|
19 |
-
🔎
|
20 |
|
21 |
-
|
22 |
-
🐇 For a smaller model that you can run on many GPUs, check our [7B model demo](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat).
|
23 |
|
|
|
24 |
"""
|
25 |
|
26 |
LICENSE = """
|
27 |
<p/>
|
28 |
|
29 |
---
|
30 |
-
|
31 |
-
|
32 |
"""
|
33 |
|
34 |
if not torch.cuda.is_available():
|
@@ -37,7 +37,11 @@ if not torch.cuda.is_available():
|
|
37 |
|
38 |
if torch.cuda.is_available():
|
39 |
model_id = "ytu-ce-cosmos/Turkish-Llama-8b-DPO-v0.1"
|
40 |
-
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
|
|
|
|
|
41 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
42 |
tokenizer.use_default_system_prompt = False
|
43 |
|
@@ -128,11 +132,11 @@ chat_interface = gr.ChatInterface(
|
|
128 |
],
|
129 |
stop_btn=None,
|
130 |
examples=[
|
131 |
-
["
|
132 |
-
["
|
133 |
-
["
|
134 |
-
["
|
135 |
-
["
|
136 |
],
|
137 |
cache_examples=False,
|
138 |
type="messages",
|
|
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
+
# Turkish LLaMA 8B Chat
|
16 |
|
17 |
+
This Space demonstrates [Turkish-Llama-8b-DPO-v0.1](https://huggingface.co/ytu-ce-cosmos/Turkish-Llama-8b-DPO-v0.1) by YTU COSMOS Research Group, an 8B parameter model fine-tuned for Turkish language understanding and generation. Feel free to play with it, or duplicate to run generations without a queue!
|
18 |
|
19 |
+
🔎 This model is the newest and most advanced iteration of CosmosLLama, developed by merging two distinctly trained CosmosLLaMa-Instruct DPO models.
|
20 |
|
21 |
+
🤖 The model is optimized for Turkish language tasks and can handle various text generation scenarios including conversations, instructions, and general text completion.
|
|
|
22 |
|
23 |
+
💡 You can also try the model on the official demo page: [cosmos.yildiz.edu.tr/cosmosllama](https://cosmos.yildiz.edu.tr/cosmosllama)
|
24 |
"""
|
25 |
|
26 |
LICENSE = """
|
27 |
<p/>
|
28 |
|
29 |
---
|
30 |
+
This demo uses [Turkish-Llama-8b-DPO-v0.1](https://huggingface.co/ytu-ce-cosmos/Turkish-Llama-8b-DPO-v0.1) by YTU COSMOS Research Group,
|
31 |
+
and is governed by the original llama3 license.
|
32 |
"""
|
33 |
|
34 |
if not torch.cuda.is_available():
|
|
|
37 |
|
38 |
if torch.cuda.is_available():
|
39 |
model_id = "ytu-ce-cosmos/Turkish-Llama-8b-DPO-v0.1"
|
40 |
+
model = AutoModelForCausalLM.from_pretrained(
|
41 |
+
model_id,
|
42 |
+
device_map="auto",
|
43 |
+
torch_dtype=torch.float16,
|
44 |
+
)
|
45 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
46 |
tokenizer.use_default_system_prompt = False
|
47 |
|
|
|
132 |
],
|
133 |
stop_btn=None,
|
134 |
examples=[
|
135 |
+
["Merhaba! Nasılsın?"],
|
136 |
+
["Python programlama dilini kısaca açıklayabilir misin?"],
|
137 |
+
["Külkedisi masalının özetini bir cümlede anlat."],
|
138 |
+
["Yapay zeka alanında açık kaynak kodun faydaları nelerdir?"],
|
139 |
+
["İstanbul'un en ünlü turistik yerlerini sıralar mısın?"],
|
140 |
],
|
141 |
cache_examples=False,
|
142 |
type="messages",
|