Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,12 @@ from langchain.chains import RetrievalQA
|
|
10 |
from langchain.llms import HuggingFacePipeline
|
11 |
from transformers import AutoTokenizer, TextStreamer, pipeline, BitsAndBytesConfig, AutoModelForCausalLM
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
# Model initialization
|
14 |
model_id = "meta-llama/Llama-3.2-3B-Instruct"
|
15 |
token = os.environ.get("HF_TOKEN")
|
|
|
10 |
from langchain.llms import HuggingFacePipeline
|
11 |
from transformers import AutoTokenizer, TextStreamer, pipeline, BitsAndBytesConfig, AutoModelForCausalLM
|
12 |
|
13 |
+
TORCH_VERSION = torch.__version__
|
14 |
+
SUPPORTED_TORCH_VERSIONS = ['2.0.1', '2.1.2', '2.2.2', '2.4.0']
|
15 |
+
if TORCH_VERSION.rsplit('+')[0] not in SUPPORTED_TORCH_VERSIONS:
|
16 |
+
print(f"Warning: Current PyTorch version {TORCH_VERSION} may not be compatible with ZeroGPU. "
|
17 |
+
f"Supported versions are: {', '.join(SUPPORTED_TORCH_VERSIONS)}")
|
18 |
+
|
19 |
# Model initialization
|
20 |
model_id = "meta-llama/Llama-3.2-3B-Instruct"
|
21 |
token = os.environ.get("HF_TOKEN")
|