Spaces:
Sleeping
Sleeping
switched to hf_hub_download
Browse files- backend.py +2 -2
backend.py
CHANGED
@@ -31,13 +31,13 @@ llm = Llama(
|
|
31 |
)
|
32 |
|
33 |
|
34 |
-
model_id = "google/gemma-2-2b-it"
|
35 |
tokenizer = GemmaTokenizerFast.from_pretrained(model_id)
|
36 |
model = AutoModelForCausalLM.from_pretrained(
|
37 |
model_id,
|
38 |
device_map="auto",
|
39 |
torch_dtype= torch.float16 if torch.cuda.is_available() else torch.float32,
|
40 |
-
)
|
41 |
# what models will be used by LlamaIndex:
|
42 |
Settings.embed_model = InstructorEmbedding(model_name="hkunlp/instructor-base")
|
43 |
#Settings.llm = GemmaLLMInterface(model=model, tokenizer=tokenizer)
|
|
|
31 |
)
|
32 |
|
33 |
|
34 |
+
"""model_id = "google/gemma-2-2b-it"
|
35 |
tokenizer = GemmaTokenizerFast.from_pretrained(model_id)
|
36 |
model = AutoModelForCausalLM.from_pretrained(
|
37 |
model_id,
|
38 |
device_map="auto",
|
39 |
torch_dtype= torch.float16 if torch.cuda.is_available() else torch.float32,
|
40 |
+
)"""
|
41 |
# what models will be used by LlamaIndex:
|
42 |
Settings.embed_model = InstructorEmbedding(model_name="hkunlp/instructor-base")
|
43 |
#Settings.llm = GemmaLLMInterface(model=model, tokenizer=tokenizer)
|