pandora-s commited on
Commit
e4d88e1
·
verified ·
1 Parent(s): bd20052

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -8
app.py CHANGED
@@ -8,7 +8,7 @@ HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
8
  def get_available_free(use_cache = False):
9
  if use_cache:
10
  if os.path.exists(str(os.getcwd())+"/data.csv"):
11
- print("Loading data from file...")
12
  return pd.read_csv("data.csv").to_dict(orient='list')
13
  models_dict = InferenceClient(token=HUGGINGFACE_TOKEN).list_deployed_models("text-generation-inference")
14
  models = models_dict['text-generation'] + models_dict['text2text-generation']
@@ -22,37 +22,41 @@ def get_available_free(use_cache = False):
22
  "Chat Completion": [],
23
  "Vision": []
24
  }
25
- for m in list(set(models + models_vision + models_others)):
 
 
 
26
  text_available = False
27
  chat_available = False
28
  vision_available = False
29
  if m in models_vision:
30
  vision_available = True
31
  pro_sub = False
 
32
  try:
33
  InferenceClient(m, timeout=10, token=HUGGINGFACE_TOKEN).text_generation("Hi.", max_new_tokens=1)
34
  text_available = True
35
  except Exception as e:
36
- print(e)
37
  if e and "Model requires a Pro subscription" in str(e):
38
  pro_sub = True
39
  if e and "Rate limit reached" in str(e):
40
- print("Rate Limited!!")
41
  if os.path.exists(str(os.getcwd())+"/data.csv"):
42
- print("Loading data from file...")
43
  return pd.read_csv(str(os.getcwd())+"/data.csv").to_dict(orient='list')
44
  return []
45
  try:
46
  InferenceClient(m, timeout=10).chat_completion(messages=[{'role': 'user', 'content': 'Hi.'}], max_tokens=1)
47
  chat_available = True
48
  except Exception as e:
49
- print(e)
50
  if e and "Model requires a Pro subscription" in str(e):
51
  pro_sub = True
52
  if e and "Rate limit reached" in str(e):
53
- print("Rate Limited!!")
54
  if os.path.exists("data.csv"):
55
- print("Loading data from file...")
56
  return pd.read_csv(str(os.getcwd())+"/data.csv").to_dict(orient='list')
57
  return []
58
  models_conclusion["Model"].append(m)
 
8
  def get_available_free(use_cache = False):
9
  if use_cache:
10
  if os.path.exists(str(os.getcwd())+"/data.csv"):
11
+ # print("Loading data from file...")
12
  return pd.read_csv("data.csv").to_dict(orient='list')
13
  models_dict = InferenceClient(token=HUGGINGFACE_TOKEN).list_deployed_models("text-generation-inference")
14
  models = models_dict['text-generation'] + models_dict['text2text-generation']
 
22
  "Chat Completion": [],
23
  "Vision": []
24
  }
25
+
26
+ all_models = list(set(models + models_vision + models_others))
27
+ print(all_models)
28
+ for m in all_models:
29
  text_available = False
30
  chat_available = False
31
  vision_available = False
32
  if m in models_vision:
33
  vision_available = True
34
  pro_sub = False
35
+ print(m)
36
  try:
37
  InferenceClient(m, timeout=10, token=HUGGINGFACE_TOKEN).text_generation("Hi.", max_new_tokens=1)
38
  text_available = True
39
  except Exception as e:
40
+ # print(e)
41
  if e and "Model requires a Pro subscription" in str(e):
42
  pro_sub = True
43
  if e and "Rate limit reached" in str(e):
44
+ # print("Rate Limited!!")
45
  if os.path.exists(str(os.getcwd())+"/data.csv"):
46
+ # print("Loading data from file...")
47
  return pd.read_csv(str(os.getcwd())+"/data.csv").to_dict(orient='list')
48
  return []
49
  try:
50
  InferenceClient(m, timeout=10).chat_completion(messages=[{'role': 'user', 'content': 'Hi.'}], max_tokens=1)
51
  chat_available = True
52
  except Exception as e:
53
+ # print(e)
54
  if e and "Model requires a Pro subscription" in str(e):
55
  pro_sub = True
56
  if e and "Rate limit reached" in str(e):
57
+ # print("Rate Limited!!")
58
  if os.path.exists("data.csv"):
59
+ # print("Loading data from file...")
60
  return pd.read_csv(str(os.getcwd())+"/data.csv").to_dict(orient='list')
61
  return []
62
  models_conclusion["Model"].append(m)