Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,26 +3,31 @@ import torch
|
|
3 |
from fastapi import FastAPI
|
4 |
from pydantic import BaseModel
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
|
6 |
import uvicorn
|
7 |
|
8 |
-
# Define a Pydantic model for request validation
|
9 |
class Query(BaseModel):
|
10 |
text: str
|
11 |
|
12 |
-
# Initialize FastAPI app
|
13 |
app = FastAPI(title="Financial Chatbot API")
|
14 |
|
15 |
-
# Load
|
16 |
-
|
17 |
model = AutoModelForCausalLM.from_pretrained(
|
18 |
-
|
19 |
device_map="auto",
|
20 |
trust_remote_code=True
|
21 |
)
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
tokenizer.pad_token = tokenizer.eos_token
|
24 |
|
25 |
-
#
|
26 |
chat_pipe = pipeline(
|
27 |
"text-generation",
|
28 |
model=model,
|
@@ -32,14 +37,12 @@ chat_pipe = pipeline(
|
|
32 |
top_p=0.95,
|
33 |
)
|
34 |
|
35 |
-
# Define an endpoint for generating responses
|
36 |
@app.post("/generate")
|
37 |
def generate(query: Query):
|
38 |
prompt = f"Question: {query.text}\nAnswer: "
|
39 |
response = chat_pipe(prompt)[0]["generated_text"]
|
40 |
return {"response": response}
|
41 |
|
42 |
-
# Run the app using uvicorn; default port is 7860 (as expected by Hugging Face Spaces)
|
43 |
if __name__ == "__main__":
|
44 |
port = int(os.environ.get("PORT", 7860))
|
45 |
-
uvicorn.run(app, host="0.0.0.0", port=port)
|
|
|
3 |
from fastapi import FastAPI
|
4 |
from pydantic import BaseModel
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
6 |
+
from peft import PeftModel, PeftConfig
|
7 |
import uvicorn
|
8 |
|
|
|
9 |
class Query(BaseModel):
|
10 |
text: str
|
11 |
|
|
|
12 |
app = FastAPI(title="Financial Chatbot API")
|
13 |
|
14 |
+
# Load base model
|
15 |
+
base_model_name = "meta-llama/Meta-Llama-3-8B" # Update this if different base model
|
16 |
model = AutoModelForCausalLM.from_pretrained(
|
17 |
+
base_model_name,
|
18 |
device_map="auto",
|
19 |
trust_remote_code=True
|
20 |
)
|
21 |
+
|
22 |
+
# Load adapter from your checkpoint
|
23 |
+
peft_model_id = "Phoenix21/llama-3-2-3b-finetuned-finance_checkpoint2"
|
24 |
+
model = PeftModel.from_pretrained(model, peft_model_id)
|
25 |
+
|
26 |
+
# Load tokenizer from base model
|
27 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_name, trust_remote_code=True)
|
28 |
tokenizer.pad_token = tokenizer.eos_token
|
29 |
|
30 |
+
# Rest of your code remains the same...
|
31 |
chat_pipe = pipeline(
|
32 |
"text-generation",
|
33 |
model=model,
|
|
|
37 |
top_p=0.95,
|
38 |
)
|
39 |
|
|
|
40 |
@app.post("/generate")
|
41 |
def generate(query: Query):
|
42 |
prompt = f"Question: {query.text}\nAnswer: "
|
43 |
response = chat_pipe(prompt)[0]["generated_text"]
|
44 |
return {"response": response}
|
45 |
|
|
|
46 |
if __name__ == "__main__":
|
47 |
port = int(os.environ.get("PORT", 7860))
|
48 |
+
uvicorn.run(app, host="0.0.0.0", port=port)
|