Spaces:
Paused
Paused
Update generate_answer.py
Browse files- generate_answer.py +7 -3
generate_answer.py
CHANGED
@@ -1,8 +1,12 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
2 |
from langchain.prompts import PromptTemplate
|
3 |
|
4 |
-
model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
|
5 |
-
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
|
|
|
|
|
|
|
6 |
|
7 |
|
8 |
def generate_answer(question):
|
|
|
1 |
+
# from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
from transformers import AutoModelForCausalLM, GemmaTokenizer
|
3 |
from langchain.prompts import PromptTemplate
|
4 |
|
5 |
+
# model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
|
6 |
+
# tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True)
|
7 |
+
|
8 |
+
model = AutoModelForCausalLM.from_pretrained('google/codegemma-1.1-2b', trust_remote_code=True)
|
9 |
+
tokenizer = GemmaTokenizer.from_pretrained('google/codegemma-1.1-2b', trust_remote_code=True)
|
10 |
|
11 |
|
12 |
def generate_answer(question):
|