Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,25 @@
|
|
1 |
import os
|
|
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
3 |
-
from accelerate import Accelerator # Accelerate๋ฅผ ๋ณ๋๋ก ์ํฌํธ
|
4 |
from sentence_transformers import SentenceTransformer
|
5 |
from datasets import load_dataset
|
6 |
-
import faiss
|
7 |
import gradio as gr
|
|
|
8 |
|
9 |
-
|
|
|
10 |
|
|
|
11 |
model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
12 |
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_api_key)
|
13 |
accelerator = Accelerator() # Accelerator ์ธ์คํด์ค ์์ฑ
|
|
|
|
|
14 |
model = AutoModelForCausalLM.from_pretrained(
|
15 |
model_id,
|
16 |
token=hf_api_key,
|
17 |
-
torch_dtype=torch.bfloat16,
|
18 |
quantization_config=BitsAndBytesConfig(
|
19 |
load_in_4bit=True,
|
20 |
bnb_4bit_use_double_quant=True,
|
@@ -24,16 +29,21 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
24 |
)
|
25 |
model = accelerator.prepare(model) # ๋ชจ๋ธ์ Accelerator์ ์ค๋น์ํด
|
26 |
|
|
|
27 |
ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
|
28 |
dataset = load_dataset("not-lain/wikipedia", revision="embedded")
|
29 |
data = dataset["train"]
|
30 |
data = data.add_faiss_index("embeddings")
|
31 |
|
|
|
32 |
def search(query: str, k: int = 3):
|
33 |
embedded_query = ST.encode(query)
|
34 |
scores, retrieved_examples = data.get_nearest_examples("embeddings", embedded_query, k=k)
|
35 |
return scores, retrieved_examples
|
36 |
|
|
|
|
|
|
|
37 |
def format_prompt(prompt, retrieved_documents, k):
|
38 |
PROMPT = f"Question:{prompt}\nContext:"
|
39 |
for idx in range(k):
|
|
|
1 |
import os
|
2 |
+
import torch # torch๋ฅผ ์ํฌํธ
|
3 |
+
import faiss
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
|
5 |
from sentence_transformers import SentenceTransformer
|
6 |
from datasets import load_dataset
|
|
|
7 |
import gradio as gr
|
8 |
+
from accelerate import Accelerator
|
9 |
|
10 |
+
# ํ๊ฒฝ ๋ณ์์์ Hugging Face API ํค ๋ก๋
|
11 |
+
hf_api_key = os.getenv('HF_API_KEY')
|
12 |
|
13 |
+
# ๋ชจ๋ธ ID ๋ฐ ํ ํฌ๋์ด์ ์ค์
|
14 |
model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
15 |
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_api_key)
|
16 |
accelerator = Accelerator() # Accelerator ์ธ์คํด์ค ์์ฑ
|
17 |
+
|
18 |
+
# ๋ชจ๋ธ ๋ก๋ฉ
|
19 |
model = AutoModelForCausalLM.from_pretrained(
|
20 |
model_id,
|
21 |
token=hf_api_key,
|
22 |
+
torch_dtype=torch.bfloat16, # torch๋ฅผ ์ฌ์ฉํด ๋ฐ์ดํฐ ํ์
์ง์
|
23 |
quantization_config=BitsAndBytesConfig(
|
24 |
load_in_4bit=True,
|
25 |
bnb_4bit_use_double_quant=True,
|
|
|
29 |
)
|
30 |
model = accelerator.prepare(model) # ๋ชจ๋ธ์ Accelerator์ ์ค๋น์ํด
|
31 |
|
32 |
+
# ๋ฐ์ดํฐ ๋ก๋ฉ ๋ฐ faiss ์ธ๋ฑ์ค ์์ฑ
|
33 |
ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
|
34 |
dataset = load_dataset("not-lain/wikipedia", revision="embedded")
|
35 |
data = dataset["train"]
|
36 |
data = data.add_faiss_index("embeddings")
|
37 |
|
38 |
+
# ๊ฒ์ ๋ฐ ์๋ต ์์ฑ ํจ์
|
39 |
def search(query: str, k: int = 3):
|
40 |
embedded_query = ST.encode(query)
|
41 |
scores, retrieved_examples = data.get_nearest_examples("embeddings", embedded_query, k=k)
|
42 |
return scores, retrieved_examples
|
43 |
|
44 |
+
# ๋๋จธ์ง ์ฝ๋๋ ์ด์ ๊ณผ ๋์ผํ๊ฒ ์ ์ง
|
45 |
+
|
46 |
+
|
47 |
def format_prompt(prompt, retrieved_documents, k):
|
48 |
PROMPT = f"Question:{prompt}\nContext:"
|
49 |
for idx in range(k):
|