Chengxb888 commited on
Commit
3cc14e8
·
verified ·
1 Parent(s): 7eb1730

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -10,6 +10,7 @@ def greet_json():
10
 
11
  @app.get("/hello/{msg}")
12
  def say_hello(msg: str):
 
13
  torch.random.manual_seed(0)
14
  model = AutoModelForCausalLM.from_pretrained(
15
  "microsoft/Phi-3-mini-128k-instruct",
@@ -17,7 +18,7 @@ def say_hello(msg: str):
17
  torch_dtype="auto",
18
  trust_remote_code=True,
19
  )
20
-
21
  tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct")
22
 
23
  messages = [
@@ -26,13 +27,13 @@ def say_hello(msg: str):
26
  {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."},
27
  {"role": "user", "content": msg},
28
  ]
29
-
30
  pipe = pipeline(
31
  "text-generation",
32
  model=model,
33
  tokenizer=tokenizer,
34
  )
35
-
36
  generation_args = {
37
  "max_new_tokens": 500,
38
  "return_full_text": False,
@@ -41,4 +42,5 @@ def say_hello(msg: str):
41
  }
42
 
43
  output = pipe(messages, **generation_args)
 
44
  return {"message": output[0]['generated_text']}
 
10
 
11
  @app.get("/hello/{msg}")
12
  def say_hello(msg: str):
13
+ print("model")
14
  torch.random.manual_seed(0)
15
  model = AutoModelForCausalLM.from_pretrained(
16
  "microsoft/Phi-3-mini-128k-instruct",
 
18
  torch_dtype="auto",
19
  trust_remote_code=True,
20
  )
21
+ print("token & msg")
22
  tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct")
23
 
24
  messages = [
 
27
  {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."},
28
  {"role": "user", "content": msg},
29
  ]
30
+ print("pipe")
31
  pipe = pipeline(
32
  "text-generation",
33
  model=model,
34
  tokenizer=tokenizer,
35
  )
36
+ print("output")
37
  generation_args = {
38
  "max_new_tokens": 500,
39
  "return_full_text": False,
 
42
  }
43
 
44
  output = pipe(messages, **generation_args)
45
+ print("complete")
46
  return {"message": output[0]['generated_text']}