Zwounds commited on
Commit
74654a8
·
verified ·
1 Parent(s): d8057b0

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. demo.py +5 -3
  2. requirements.txt +3 -109
demo.py CHANGED
@@ -8,13 +8,15 @@ logging.basicConfig(level=logging.INFO)
8
  logger = logging.getLogger(__name__)
9
 
10
  def load_model():
11
- """Load fine-tuned model."""
12
  logger.info("Loading model...")
13
  tokenizer = AutoTokenizer.from_pretrained("Zwounds/boolean-search-model")
 
 
14
  model = AutoModelForCausalLM.from_pretrained(
15
  "Zwounds/boolean-search-model",
16
- torch_dtype="auto",
17
- device_map="auto",
18
  )
19
  return model, tokenizer
20
 
 
8
  logger = logging.getLogger(__name__)
9
 
10
  def load_model():
11
+ """Load fine-tuned model without quantization for CPU compatibility."""
12
  logger.info("Loading model...")
13
  tokenizer = AutoTokenizer.from_pretrained("Zwounds/boolean-search-model")
14
+
15
+ # Load model in the most compatible way for Spaces
16
  model = AutoModelForCausalLM.from_pretrained(
17
  "Zwounds/boolean-search-model",
18
+ low_cpu_mem_usage=True,
19
+ torch_dtype=torch.float32 # Use standard floating point for CPU
20
  )
21
  return model, tokenizer
22
 
requirements.txt CHANGED
@@ -1,109 +1,3 @@
1
- accelerate==1.4.0
2
- aiofiles==23.2.1
3
- aiohappyeyeballs==2.6.0
4
- aiohttp==3.11.13
5
- aiosignal==1.3.2
6
- annotated-types==0.7.0
7
- anyio==4.8.0
8
- attrs==25.1.0
9
- bitsandbytes==0.45.3
10
- certifi==2025.1.31
11
- charset-normalizer==3.4.1
12
- click==8.1.8
13
- cut-cross-entropy==25.1.1
14
- datasets==3.3.2
15
- diffusers==0.32.2
16
- dill==0.3.8
17
- docstring-parser==0.16
18
- fastapi==0.115.11
19
- ffmpy==0.5.0
20
- filelock==3.17.0
21
- frozenlist==1.5.0
22
- fsspec==2024.12.0
23
- gradio==5.21.0
24
- gradio-client==1.7.2
25
- groovy==0.1.2
26
- h11==0.14.0
27
- hf-transfer==0.1.9
28
- httpcore==1.0.7
29
- httpx==0.28.1
30
- huggingface-hub==0.29.3
31
- idna==3.10
32
- importlib-metadata==8.6.1
33
- jinja2==3.1.6
34
- markdown-it-py==3.0.0
35
- markupsafe==2.1.5
36
- mdurl==0.1.2
37
- mpmath==1.3.0
38
- multidict==6.1.0
39
- multiprocess==0.70.16
40
- networkx==3.4.2
41
- numpy==2.2.3
42
- nvidia-cublas-cu12==12.4.5.8
43
- nvidia-cuda-cupti-cu12==12.4.127
44
- nvidia-cuda-nvrtc-cu12==12.4.127
45
- nvidia-cuda-runtime-cu12==12.4.127
46
- nvidia-cudnn-cu12==9.1.0.70
47
- nvidia-cufft-cu12==11.2.1.3
48
- nvidia-curand-cu12==10.3.5.147
49
- nvidia-cusolver-cu12==11.6.1.9
50
- nvidia-cusparse-cu12==12.3.1.170
51
- nvidia-cusparselt-cu12==0.6.2
52
- nvidia-nccl-cu12==2.21.5
53
- nvidia-nvjitlink-cu12==12.4.127
54
- nvidia-nvtx-cu12==12.4.127
55
- orjson==3.10.15
56
- packaging==24.2
57
- pandas==2.2.3
58
- peft==0.14.0
59
- pillow==11.1.0
60
- propcache==0.3.0
61
- protobuf==3.20.3
62
- psutil==7.0.0
63
- pyarrow==19.0.1
64
- pydantic==2.10.6
65
- pydantic-core==2.27.2
66
- pydub==0.25.1
67
- pygments==2.19.1
68
- python-dateutil==2.9.0.post0
69
- python-multipart==0.0.20
70
- pytz==2025.1
71
- pyyaml==6.0.2
72
- regex==2024.11.6
73
- requests==2.32.3
74
- rich==13.9.4
75
- ruff==0.10.0
76
- safehttpx==0.1.6
77
- safetensors==0.5.3
78
- semantic-version==2.10.0
79
- sentencepiece==0.2.0
80
- setuptools==76.0.0
81
- shellingham==1.5.4
82
- shtab==1.7.1
83
- six==1.17.0
84
- sniffio==1.3.1
85
- starlette==0.46.1
86
- sympy==1.13.1
87
- tokenizers==0.21.0
88
- tomlkit==0.13.2
89
- torch==2.6.0
90
- torchvision==0.21.0
91
- tqdm==4.67.1
92
- transformers==4.49.0
93
- triton==3.2.0
94
- trl==0.15.2
95
- typeguard==4.4.2
96
- typer==0.15.2
97
- typing-extensions==4.12.2
98
- tyro==0.9.16
99
- tzdata==2025.1
100
- unsloth==2025.3.9
101
- unsloth-zoo==2025.3.8
102
- urllib3==2.3.0
103
- uvicorn==0.34.0
104
- websockets==15.0.1
105
- wheel==0.45.1
106
- xformers==0.0.29.post3
107
- xxhash==3.5.0
108
- yarl==1.18.3
109
- zipp==3.21.0
 
1
+ torch==2.0.1
2
+ transformers==4.31.0
3
+ gradio>=4.0.0