FabioSantos commited on
Commit
91bfedf
·
verified ·
1 Parent(s): bda2282

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -45
app.py CHANGED
@@ -1,45 +1,45 @@
1
- import gradio as gr
2
- from huggingface_hub import hf_hub_download
3
- from llama_cpp import Llama
4
-
5
- # download model
6
- model_name_or_path = "FabioSantos/llama3Finetune_unsloth" # repo id
7
- # 4bit
8
- model_basename = "llama3Finetune_unsloth-unsloth.Q8_0.gguf" # file name
9
-
10
- model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
11
- print(model_path)
12
-
13
- lcpp_llm = Llama(
14
- model_path=model_path,
15
- n_threads=2, # CPU cores
16
- n_batch=512, # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
17
- n_gpu_layers=43, # Change this value based on your model and your GPU VRAM pool.
18
- n_ctx=4096, # Context window
19
- )
20
-
21
- prompt_template = "Responda as questões.\nHuman: {prompt}\nAssistant:\n"
22
-
23
- def get_response(text):
24
- prompt = prompt_template.format(prompt=text)
25
- response = lcpp_llm(
26
- prompt=prompt,
27
- max_tokens=256,
28
- temperature=0.5,
29
- top_p=0.95,
30
- top_k=50,
31
- stop=[''], # Dynamic stopping when such token is detected.
32
- echo=True # return the prompt
33
- )
34
- return response['choices'][0]['text'].split('Assistant:\n')[1]
35
-
36
- interface = gr.Interface(
37
- fn=get_response,
38
- inputs="text",
39
- outputs="text",
40
- title="Assistente Virtual",
41
- description="Forneça uma questão e visualize a resposta do assistente."
42
- )
43
-
44
- if __name__ == "__main__":
45
- interface.launch()
 
1
+ import gradio as gr
2
+ from huggingface_hub import hf_hub_download
3
+ from llama_cpp import Llama
4
+
5
+ # download model
6
+ model_name_or_path = "FabioSantos/llama3Finetune_unsloth" # repo id
7
+ # 4bit
8
+ model_basename = "llama3Finetune_unsloth-unsloth.Q8_0.gguf" # file name
9
+
10
+ model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
11
+ print(model_path)
12
+
13
+ lcpp_llm = Llama(
14
+ model_path=model_path,
15
+ n_threads=2, # CPU cores
16
+ n_batch=512, # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
17
+ n_gpu_layers=43, # Change this value based on your model and your GPU VRAM pool.
18
+ n_ctx=4096, # Context window
19
+ )
20
+
21
+ prompt_template = "Responda as questões.\nHuman: {prompt}\nAssistant:\n"
22
+
23
+ def get_response(text):
24
+ prompt = prompt_template.format(prompt=text)
25
+ response = lcpp_llm(
26
+ prompt=prompt,
27
+ max_tokens=256,
28
+ temperature=0.5,
29
+ top_p=0.95,
30
+ top_k=50,
31
+ stop=[''], # Dynamic stopping when such token is detected.
32
+ echo=True # return the prompt
33
+ )
34
+ return response
35
+
36
+ interface = gr.Interface(
37
+ fn=get_response,
38
+ inputs="text",
39
+ outputs="text",
40
+ title="Assistente Virtual",
41
+ description="Forneça uma questão e visualize a resposta do assistente."
42
+ )
43
+
44
+ if __name__ == "__main__":
45
+ interface.launch()