Mustehson commited on
Commit
902da82
·
verified ·
1 Parent(s): bb75103

Change Model

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -49,7 +49,7 @@ except:
49
  #---------------------------------------
50
 
51
  #-------LOAD HUGGINGFACE PIPELINE-------
52
- tokenizer = AutoTokenizer.from_pretrained("motherduckdb/DuckDB-NSQL-7B-v0.1")
53
 
54
  quantization_config = BitsAndBytesConfig(
55
  load_in_4bit=True,
@@ -57,7 +57,7 @@ quantization_config = BitsAndBytesConfig(
57
  bnb_4bit_use_double_quant=True,
58
  bnb_4bit_quant_type= "nf4")
59
 
60
- model = AutoModelForCausalLM.from_pretrained("motherduckdb/DuckDB-NSQL-7B-v0.1", quantization_config=quantization_config,
61
  device_map="auto", torch_dtype=torch.bfloat16)
62
 
63
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=1024, return_full_text=False)
 
49
  #---------------------------------------
50
 
51
  #-------LOAD HUGGINGFACE PIPELINE-------
52
+ tokenizer = AutoTokenizer.from_pretrained("defog/llama-3-sqlcoder-8b")
53
 
54
  quantization_config = BitsAndBytesConfig(
55
  load_in_4bit=True,
 
57
  bnb_4bit_use_double_quant=True,
58
  bnb_4bit_quant_type= "nf4")
59
 
60
+ model = AutoModelForCausalLM.from_pretrained("defog/llama-3-sqlcoder-8b", quantization_config=quantization_config,
61
  device_map="auto", torch_dtype=torch.bfloat16)
62
 
63
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=1024, return_full_text=False)