FabioSantos commited on
Commit
bda2282
·
verified ·
1 Parent(s): 5068edb

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +45 -0
  2. requirements.txt +0 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import hf_hub_download
3
+ from llama_cpp import Llama
4
+
5
+ # download model
6
+ model_name_or_path = "FabioSantos/llama3Finetune_unsloth" # repo id
7
+ # 4bit
8
+ model_basename = "llama3Finetune_unsloth-unsloth.Q8_0.gguf" # file name
9
+
10
+ model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
11
+ print(model_path)
12
+
13
+ lcpp_llm = Llama(
14
+ model_path=model_path,
15
+ n_threads=2, # CPU cores
16
+ n_batch=512, # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
17
+ n_gpu_layers=43, # Change this value based on your model and your GPU VRAM pool.
18
+ n_ctx=4096, # Context window
19
+ )
20
+
21
+ prompt_template = "Responda as questões.\nHuman: {prompt}\nAssistant:\n"
22
+
23
+ def get_response(text):
24
+ prompt = prompt_template.format(prompt=text)
25
+ response = lcpp_llm(
26
+ prompt=prompt,
27
+ max_tokens=256,
28
+ temperature=0.5,
29
+ top_p=0.95,
30
+ top_k=50,
31
+ stop=[''], # Dynamic stopping when such token is detected.
32
+ echo=True # return the prompt
33
+ )
34
+ return response['choices'][0]['text'].split('Assistant:\n')[1]
35
+
36
+ interface = gr.Interface(
37
+ fn=get_response,
38
+ inputs="text",
39
+ outputs="text",
40
+ title="Assistente Virtual",
41
+ description="Forneça uma questão e visualize a resposta do assistente."
42
+ )
43
+
44
+ if __name__ == "__main__":
45
+ interface.launch()
requirements.txt ADDED
Binary file (218 Bytes). View file