Zwounds commited on
Commit
d8057b0
·
verified ·
1 Parent(s): a966745

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. demo.py +6 -7
demo.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  import torch
3
- from unsloth import FastLanguageModel
4
  import logging
5
 
6
  # Setup logging
@@ -10,13 +10,12 @@ logger = logging.getLogger(__name__)
10
  def load_model():
11
  """Load fine-tuned model."""
12
  logger.info("Loading model...")
13
- model, tokenizer = FastLanguageModel.from_pretrained(
14
- "boolean_model_merged",
15
- max_seq_length=2048,
16
- dtype=None, # Auto-detect
17
- load_in_4bit=True
18
  )
19
- FastLanguageModel.for_inference(model)
20
  return model, tokenizer
21
 
22
  def format_prompt(query):
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import logging
5
 
6
  # Setup logging
 
10
  def load_model():
11
  """Load fine-tuned model."""
12
  logger.info("Loading model...")
13
+ tokenizer = AutoTokenizer.from_pretrained("Zwounds/boolean-search-model")
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ "Zwounds/boolean-search-model",
16
+ torch_dtype="auto",
17
+ device_map="auto",
18
  )
 
19
  return model, tokenizer
20
 
21
  def format_prompt(query):