Pijush2023 commited on
Commit
0726e7f
·
verified ·
1 Parent(s): d118988

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -3
app.py CHANGED
@@ -96,13 +96,21 @@ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-b
96
 
97
  # Initialize the models
98
  def initialize_phi_model():
 
 
 
 
 
 
99
  model = AutoModelForCausalLM.from_pretrained(
100
- "microsoft/Phi-3.5-mini-instruct",
101
  device_map="cuda",
102
  torch_dtype="auto",
103
  trust_remote_code=True,
104
  )
105
- tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct")
 
 
106
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
107
 
108
  def initialize_gpt_model():
@@ -660,7 +668,7 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
660
 
661
  response = selected_model(prompt, **{
662
  "max_new_tokens": 300,
663
- "return_full_text": True,
664
  "temperature": 0.0,
665
  "do_sample": False,
666
  })
 
96
 
97
  # Initialize the models
98
  def initialize_phi_model():
99
+ # model = AutoModelForCausalLM.from_pretrained(
100
+ # "microsoft/Phi-3.5-mini-instruct",
101
+ # device_map="cuda",
102
+ # torch_dtype="auto",
103
+ # trust_remote_code=True,
104
+ # )
105
  model = AutoModelForCausalLM.from_pretrained(
106
+ "microsoft/Phi-3.5-MoE-instruct",
107
  device_map="cuda",
108
  torch_dtype="auto",
109
  trust_remote_code=True,
110
  )
111
+ # tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct")
112
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-MoE-instruct")
113
+
114
  return pipeline("text-generation", model=model, tokenizer=tokenizer)
115
 
116
  def initialize_gpt_model():
 
668
 
669
  response = selected_model(prompt, **{
670
  "max_new_tokens": 300,
671
+ "return_full_text": False,
672
  "temperature": 0.0,
673
  "do_sample": False,
674
  })