Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -238,14 +238,14 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
238 |
|
239 |
# ุชุญู
ูู ุงููู
ูุฐุฌ ูุงูู
ุญูู
|
240 |
model_id = "wasmdashai/Seed-Coder-8B-Instruct-V1"
|
|
|
241 |
# model_id = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
242 |
|
243 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id
|
244 |
model = AutoModelForCausalLM.from_pretrained(
|
245 |
model_id,
|
246 |
-
torch_dtype=
|
247 |
-
device_map="auto"
|
248 |
-
trust_remote_code=True
|
249 |
)
|
250 |
|
251 |
# @spaces.GPU
|
|
|
238 |
|
239 |
# ุชุญู
ูู ุงููู
ูุฐุฌ ูุงูู
ุญูู
|
240 |
model_id = "wasmdashai/Seed-Coder-8B-Instruct-V1"
|
241 |
+
model_id = "wasmdashai/wasm-32B-Instruct-V1"
|
242 |
# model_id = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
243 |
|
244 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
245 |
model = AutoModelForCausalLM.from_pretrained(
|
246 |
model_id,
|
247 |
+
torch_dtype="auto",
|
248 |
+
device_map="auto"
|
|
|
249 |
)
|
250 |
|
251 |
# @spaces.GPU
|