0Tick commited on
Commit
8c6742d
·
1 Parent(s): 04bc708

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -16
app.py CHANGED
@@ -32,23 +32,11 @@ available_models = ["0Tick/e621TagAutocomplete","0Tick/danbooruTagAutocomplete"]
32
  current = Model()
33
  job_count = 1
34
 
35
- base_dir = scripts.basedir()
36
- models_dir = os.path.join(base_dir, "models")
37
-
38
 
39
  def device():
40
  return devices.cpu
41
 
42
 
43
-
44
- def get_model_path(name):
45
- dirname = os.path.join(models_dir, name)
46
- if not os.path.isdir(dirname):
47
- return name
48
-
49
- return dirname
50
-
51
-
52
  def generate_batch(input_ids, min_length, max_length, num_beams, temperature, repetition_penalty, length_penalty, sampling_mode, top_k, top_p):
53
  top_p = float(top_p) if sampling_mode == 'Top P' else None
54
  top_k = int(top_k) if sampling_mode == 'Top K' else None
@@ -87,7 +75,7 @@ def generate(id_task, model_name, batch_count, batch_size, text, *args):
87
  current.name = None
88
 
89
  if model_name != 'None':
90
- path = get_model_path(model_name)
91
  current.tokenizer = transformers.AutoTokenizer.from_pretrained(path)
92
  current.model = transformers.AutoModelForCausalLM.from_pretrained(path)
93
  current.name = model_name
@@ -126,9 +114,6 @@ def generate(id_task, model_name, batch_count, batch_size, text, *args):
126
  return markup, ''
127
 
128
 
129
-
130
- list_available_models()
131
-
132
  with gr.Blocks(analytics_enabled=False) as space:
133
  with gr.Row():
134
  with gr.Column(scale=80):
 
32
  current = Model()
33
  job_count = 1
34
 
 
 
 
35
 
36
  def device():
37
  return devices.cpu
38
 
39
 
 
 
 
 
 
 
 
 
 
40
  def generate_batch(input_ids, min_length, max_length, num_beams, temperature, repetition_penalty, length_penalty, sampling_mode, top_k, top_p):
41
  top_p = float(top_p) if sampling_mode == 'Top P' else None
42
  top_k = int(top_k) if sampling_mode == 'Top K' else None
 
75
  current.name = None
76
 
77
  if model_name != 'None':
78
+ path = model_name
79
  current.tokenizer = transformers.AutoTokenizer.from_pretrained(path)
80
  current.model = transformers.AutoModelForCausalLM.from_pretrained(path)
81
  current.name = model_name
 
114
  return markup, ''
115
 
116
 
 
 
 
117
  with gr.Blocks(analytics_enabled=False) as space:
118
  with gr.Row():
119
  with gr.Column(scale=80):