Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -15,7 +15,7 @@ if HF_TOKEN:
|
|
15 |
else:
|
16 |
raise ValueError("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
|
17 |
|
18 |
-
|
19 |
class Query(BaseModel):
|
20 |
text: str
|
21 |
|
@@ -37,7 +37,7 @@ model = PeftModel.from_pretrained(model, peft_model_id)
|
|
37 |
tokenizer = AutoTokenizer.from_pretrained(base_model_name, trust_remote_code=True)
|
38 |
tokenizer.pad_token = tokenizer.eos_token
|
39 |
|
40 |
-
#
|
41 |
chat_pipe = pipeline(
|
42 |
"text-generation",
|
43 |
model=model,
|
@@ -55,4 +55,4 @@ def generate(query: Query):
|
|
55 |
|
56 |
if __name__ == "__main__":
|
57 |
port = int(os.environ.get("PORT", 7860))
|
58 |
-
uvicorn.run(app, host="0.0.0.0", port=port)
|
|
|
15 |
else:
|
16 |
raise ValueError("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
|
17 |
|
18 |
+
# Define a Pydantic model for request validation
|
19 |
class Query(BaseModel):
|
20 |
text: str
|
21 |
|
|
|
37 |
tokenizer = AutoTokenizer.from_pretrained(base_model_name, trust_remote_code=True)
|
38 |
tokenizer.pad_token = tokenizer.eos_token
|
39 |
|
40 |
+
# Create a text-generation pipeline
|
41 |
chat_pipe = pipeline(
|
42 |
"text-generation",
|
43 |
model=model,
|
|
|
55 |
|
56 |
if __name__ == "__main__":
|
57 |
port = int(os.environ.get("PORT", 7860))
|
58 |
+
uvicorn.run(app, host="0.0.0.0", port=port)
|