Trigger82 commited on
Commit
9bf139e
·
verified ·
1 Parent(s): 7d7624b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -1,12 +1,10 @@
1
- # app.py
2
-
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  from fastapi import FastAPI
5
 
6
- # Model ID on Hugging Face
7
  MODEL_ID = "rasyosef/Phi-1_5-Instruct-v0.1"
8
 
9
- # Load tokenizer and model from local cache (pre-downloaded in Docker build)
10
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
11
  model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
12
 
@@ -18,7 +16,7 @@ def chat(query: str):
18
  GET /chat?query=Your+question
19
  Returns JSON: {"answer": "...model’s reply..."}
20
  """
21
- # Build the instruction‐style prompt expected by Phi‐1.5 Instruct
22
  prompt = (
23
  "<|im_start|>system\nYou are a helpful assistant.<|im_end|>"
24
  "<|im_start|>user\n" + query + "<|im_end|>"
 
 
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
  from fastapi import FastAPI
3
 
4
+
5
  MODEL_ID = "rasyosef/Phi-1_5-Instruct-v0.1"
6
 
7
+
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
10
 
 
16
  GET /chat?query=Your+question
17
  Returns JSON: {"answer": "...model’s reply..."}
18
  """
19
+ prompt expected by Phi‐1.5 Instruct
20
  prompt = (
21
  "<|im_start|>system\nYou are a helpful assistant.<|im_end|>"
22
  "<|im_start|>user\n" + query + "<|im_end|>"