Spaces:
Runtime error
Runtime error
Commit
·
9c51c0d
1
Parent(s):
24ec0d4
adding HF pipeline for llm model
Browse files- app.py +4 -11
- llm_ops.py +20 -9
app.py
CHANGED
@@ -35,8 +35,8 @@ def clear_chat():
|
|
35 |
return []
|
36 |
|
37 |
def create_prompt():
|
38 |
-
prompt_template = """
|
39 |
-
Use the following context to answer.
|
40 |
If you don't know the answer, just say I don't know.
|
41 |
|
42 |
{context}
|
@@ -78,14 +78,7 @@ def audio_processor(wav_file,API_key,wav_model='small',llm='HuggingFace',tempera
|
|
78 |
global qa
|
79 |
|
80 |
if llm == 'HuggingFace':
|
81 |
-
chat = llm_ops.
|
82 |
-
model_id="VMware/open-llama-7B-open-instruct",
|
83 |
-
API_key=API_key,
|
84 |
-
temperature=temperature,
|
85 |
-
max_tokens=max_tokens
|
86 |
-
)
|
87 |
-
else:
|
88 |
-
chat = llm_ops.get_openai_chat_model(API_key=API_key)
|
89 |
|
90 |
chain_type_kwargs = {"prompt": create_prompt()}
|
91 |
qa = RetrievalQA.from_chain_type(llm=chat,
|
@@ -133,7 +126,7 @@ css="""
|
|
133 |
|
134 |
title = """
|
135 |
<div style="text-align: center;max-width: 2048px;">
|
136 |
-
<h1>Q&A
|
137 |
<p style="text-align: center;">Upload a Audio file/link and query LLAMA-chatbot.
|
138 |
<i> Tools uses State of the Art Models from HuggingFace/OpenAI so, make sure to add your key.</i>
|
139 |
</p>
|
|
|
35 |
return []
|
36 |
|
37 |
def create_prompt():
|
38 |
+
prompt_template = """You are a chatbot that answers questions regarding the conversation in given context .
|
39 |
+
Use the following context to answer in sentences and points.
|
40 |
If you don't know the answer, just say I don't know.
|
41 |
|
42 |
{context}
|
|
|
78 |
global qa
|
79 |
|
80 |
if llm == 'HuggingFace':
|
81 |
+
chat = llm_ops.get_llama_model()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
chain_type_kwargs = {"prompt": create_prompt()}
|
84 |
qa = RetrievalQA.from_chain_type(llm=chat,
|
|
|
126 |
|
127 |
title = """
|
128 |
<div style="text-align: center;max-width: 2048px;">
|
129 |
+
<h1>Q&A with LLAMA on Audio files</h1>
|
130 |
<p style="text-align: center;">Upload a Audio file/link and query LLAMA-chatbot.
|
131 |
<i> Tools uses State of the Art Models from HuggingFace/OpenAI so, make sure to add your key.</i>
|
132 |
</p>
|
llm_ops.py
CHANGED
@@ -1,4 +1,8 @@
|
|
1 |
import os
|
|
|
|
|
|
|
|
|
2 |
|
3 |
def get_openai_chat_model(API_key):
|
4 |
try:
|
@@ -9,13 +13,20 @@ def get_openai_chat_model(API_key):
|
|
9 |
llm = OpenAI()
|
10 |
return llm
|
11 |
|
12 |
-
def get_hugging_face_model(model_id,API_key,temperature=0.1,max_tokens=4096):
|
13 |
-
try:
|
14 |
-
from langchain import HuggingFaceHub
|
15 |
-
except ImportError as err:
|
16 |
-
raise "{}, unable to load openAI. Please install openai and add OPENAIAPI_KEY"
|
17 |
-
chat_llm = HuggingFaceHub(huggingfacehub_api_token=API_key,
|
18 |
-
repo_id=model_id,
|
19 |
-
model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens})
|
20 |
-
return chat_llm
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
import torch
|
3 |
+
from langchain import HuggingFacePipeline
|
4 |
+
from transformers import AutoTokenizer
|
5 |
+
import transformers
|
6 |
|
7 |
def get_openai_chat_model(API_key):
|
8 |
try:
|
|
|
13 |
llm = OpenAI()
|
14 |
return llm
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
def get_llama_model():
|
18 |
+
model = "meta-llama/Llama-2-7b-chat-hf"
|
19 |
+
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
21 |
+
|
22 |
+
pipeline = transformers.pipeline("text-generation",
|
23 |
+
model=model,
|
24 |
+
tokenizer=tokenizer,
|
25 |
+
torch_dtype=torch.bfloat16,
|
26 |
+
trust_remote_code=True,
|
27 |
+
device_map="auto",
|
28 |
+
max_length=1000,
|
29 |
+
eos_token_id=tokenizer.eos_token_id
|
30 |
+
)
|
31 |
+
llm = HuggingFacePipeline(pipeline = pipeline, model_kwargs = {'temperature':0})
|
32 |
+
return llm
|