Spaces:
Build error
Build error
Update main.py
Browse files
main.py
CHANGED
@@ -15,11 +15,8 @@ llm = Llama(
|
|
15 |
class Validation(BaseModel):
|
16 |
user_prompt: str
|
17 |
system_prompt: str
|
18 |
-
max_tokens = 1024
|
19 |
-
temperature = 0.
|
20 |
-
top_p = 0.9
|
21 |
-
repeat_penalty = 1.1
|
22 |
-
top_k = 40
|
23 |
|
24 |
# FastAPI application initialization
|
25 |
app = FastAPI()
|
@@ -33,7 +30,7 @@ async def generate_response(item: Validation):
|
|
33 |
{ item.user_prompt }<|eot_id|> \n <|start_header_id|>assistant<|end_header_id|>"""
|
34 |
|
35 |
# Call the Llama model to generate a response
|
36 |
-
output = llm(prompt, max_tokens = item.max_tokens,temperature = item.temperature
|
37 |
|
38 |
# Extract and return the text from the response
|
39 |
return output['choices'][0]['text']
|
|
|
15 |
class Validation(BaseModel):
|
16 |
user_prompt: str
|
17 |
system_prompt: str
|
18 |
+
max_tokens: int = 1024
|
19 |
+
temperature: float = 0.01
|
|
|
|
|
|
|
20 |
|
21 |
# FastAPI application initialization
|
22 |
app = FastAPI()
|
|
|
30 |
{ item.user_prompt }<|eot_id|> \n <|start_header_id|>assistant<|end_header_id|>"""
|
31 |
|
32 |
# Call the Llama model to generate a response
|
33 |
+
output = llm(prompt, max_tokens = item.max_tokens,temperature = item.temperature, echo=True)
|
34 |
|
35 |
# Extract and return the text from the response
|
36 |
return output['choices'][0]['text']
|