Mustehson commited on
Commit
64e7633
·
1 Parent(s): 8829cfd

Running on CPU

Browse files
Files changed (1) hide show
  1. app.py +15 -15
app.py CHANGED
@@ -33,20 +33,20 @@ custom_css = """
33
  """
34
  print('Loading Model...')
35
  # Load Model
36
- @spaces.GPU
37
- def load_model():
38
- llama = Llama(
39
- model_path=hf_hub_download(
40
- repo_id="motherduckdb/DuckDB-NSQL-7B-v0.1-GGUF",
41
- filename="DuckDB-NSQL-7B-v0.1-q8_0.gguf",
42
- local_dir='.'
43
- ),
44
- n_ctx=2048,
45
- n_gpu_layers=-1
46
- )
47
- return llama
48
-
49
- llama = load_model()
50
  print('Model Loaded...')
51
 
52
  # Get Databases
@@ -88,7 +88,7 @@ def get_prompt(schema, query_input):
88
  return text
89
 
90
  # Generate SQL
91
- @spaces.GPU
92
  def generate_sql(prompt):
93
 
94
  result = llama(prompt, temperature=0.1, max_tokens=1000)
 
33
  """
34
  print('Loading Model...')
35
  # Load Model
36
+ # @spaces.GPU
37
+ # def load_model():
38
+ llama = Llama(
39
+ model_path=hf_hub_download(
40
+ repo_id="motherduckdb/DuckDB-NSQL-7B-v0.1-GGUF",
41
+ filename="DuckDB-NSQL-7B-v0.1-q8_0.gguf",
42
+ local_dir='.'
43
+ ),
44
+ n_ctx=2048,
45
+ n_gpu_layers=0
46
+ )
47
+ # return llama
48
+
49
+ # llama = load_model()
50
  print('Model Loaded...')
51
 
52
  # Get Databases
 
88
  return text
89
 
90
  # Generate SQL
91
+ # @spaces.GPU
92
  def generate_sql(prompt):
93
 
94
  result = llama(prompt, temperature=0.1, max_tokens=1000)