vakodiya commited on
Commit
c03ede8
·
verified ·
1 Parent(s): 6f8574e

Update generate_answer.py

Browse files
Files changed (1) hide show
  1. generate_answer.py +8 -7
generate_answer.py CHANGED
@@ -1,14 +1,15 @@
1
- # from transformers import AutoTokenizer, AutoModelForCausalLM
2
- from transformers import AutoModelForCausalLM, GemmaTokenizer
3
  from langchain.prompts import PromptTemplate
4
  import os
5
 
6
- os.environ["HF_TOKEN"] = os.getenv('HF_TOKEN')
7
- # model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
8
- # tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
9
 
10
- model = AutoModelForCausalLM.from_pretrained('google/codegemma-1.1-2b', trust_remote_code=True)
11
- tokenizer = GemmaTokenizer.from_pretrained('google/codegemma-1.1-2b', trust_remote_code=True)
 
12
 
13
 
14
  def generate_answer(question):
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ # from transformers import AutoModelForCausalLM, GemmaTokenizer
3
  from langchain.prompts import PromptTemplate
4
  import os
5
 
6
+ # os.environ["HF_TOKEN"] = os.getenv('HF_TOKEN')
7
+ model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
8
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
9
 
10
+ # Model used in code generation
11
+ # model = AutoModelForCausalLM.from_pretrained('google/codegemma-1.1-2b', trust_remote_code=True)
12
+ # tokenizer = GemmaTokenizer.from_pretrained('google/codegemma-1.1-2b', trust_remote_code=True)
13
 
14
 
15
  def generate_answer(question):