ysdede commited on
Commit
b327205
·
1 Parent(s): 814dd6c

Switch to float16.

Browse files
Files changed (1) hide show
  1. app.py +17 -13
app.py CHANGED
@@ -12,23 +12,23 @@ DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
- # Llama-2 13B Chat
16
 
17
- This Space demonstrates model [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta, a Llama 2 model with 13B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
18
 
19
- 🔎 For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
20
 
21
- 🔨 Looking for an even more powerful model? Check out the large [**70B** model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
22
- 🐇 For a smaller model that you can run on many GPUs, check our [7B model demo](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat).
23
 
 
24
  """
25
 
26
  LICENSE = """
27
  <p/>
28
 
29
  ---
30
- As a derivate work of [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta,
31
- this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/USE_POLICY.md).
32
  """
33
 
34
  if not torch.cuda.is_available():
@@ -37,7 +37,11 @@ if not torch.cuda.is_available():
37
 
38
  if torch.cuda.is_available():
39
  model_id = "ytu-ce-cosmos/Turkish-Llama-8b-DPO-v0.1"
40
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
 
 
 
 
41
  tokenizer = AutoTokenizer.from_pretrained(model_id)
42
  tokenizer.use_default_system_prompt = False
43
 
@@ -128,11 +132,11 @@ chat_interface = gr.ChatInterface(
128
  ],
129
  stop_btn=None,
130
  examples=[
131
- ["Hello there! How are you doing?"],
132
- ["Can you explain briefly to me what is the Python programming language?"],
133
- ["Explain the plot of Cinderella in a sentence."],
134
- ["How many hours does it take a man to eat a Helicopter?"],
135
- ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
136
  ],
137
  cache_examples=False,
138
  type="messages",
 
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
+ # Turkish LLaMA 8B Chat
16
 
17
+ This Space demonstrates [Turkish-Llama-8b-DPO-v0.1](https://huggingface.co/ytu-ce-cosmos/Turkish-Llama-8b-DPO-v0.1) by YTU COSMOS Research Group, an 8B parameter model fine-tuned for Turkish language understanding and generation. Feel free to play with it, or duplicate to run generations without a queue!
18
 
19
+ 🔎 This model is the newest and most advanced iteration of CosmosLLama, developed by merging two distinctly trained CosmosLLaMa-Instruct DPO models.
20
 
21
+ 🤖 The model is optimized for Turkish language tasks and can handle various text generation scenarios including conversations, instructions, and general text completion.
 
22
 
23
+ 💡 You can also try the model on the official demo page: [cosmos.yildiz.edu.tr/cosmosllama](https://cosmos.yildiz.edu.tr/cosmosllama)
24
  """
25
 
26
  LICENSE = """
27
  <p/>
28
 
29
  ---
30
+ This demo uses [Turkish-Llama-8b-DPO-v0.1](https://huggingface.co/ytu-ce-cosmos/Turkish-Llama-8b-DPO-v0.1) by YTU COSMOS Research Group,
31
+ and is governed by the original llama3 license.
32
  """
33
 
34
  if not torch.cuda.is_available():
 
37
 
38
  if torch.cuda.is_available():
39
  model_id = "ytu-ce-cosmos/Turkish-Llama-8b-DPO-v0.1"
40
+ model = AutoModelForCausalLM.from_pretrained(
41
+ model_id,
42
+ device_map="auto",
43
+ torch_dtype=torch.float16,
44
+ )
45
  tokenizer = AutoTokenizer.from_pretrained(model_id)
46
  tokenizer.use_default_system_prompt = False
47
 
 
132
  ],
133
  stop_btn=None,
134
  examples=[
135
+ ["Merhaba! Nasılsın?"],
136
+ ["Python programlama dilini kısaca açıklayabilir misin?"],
137
+ ["Külkedisi masalının özetini bir cümlede anlat."],
138
+ ["Yapay zeka alanında açık kaynak kodun faydaları nelerdir?"],
139
+ ["İstanbul'un en ünlü turistik yerlerini sıralar mısın?"],
140
  ],
141
  cache_examples=False,
142
  type="messages",