pandora-s commited on
Commit
12d93d2
Β·
verified Β·
1 Parent(s): 21882d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -7,7 +7,7 @@ from threading import Timer
7
  HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
8
  def get_available_free(use_cache = False):
9
  if use_cache:
10
- if os.path.exists("data.csv"):
11
  print("Loading data from file...")
12
  return pd.read_csv("data.csv").to_dict(orient='list')
13
  models_dict = InferenceClient(token=HUGGINGFACE_TOKEN).list_deployed_models("text-generation-inference")
@@ -30,7 +30,7 @@ def get_available_free(use_cache = False):
30
  vision_available = True
31
  pro_sub = False
32
  try:
33
- InferenceClient(m, timeout=60, token=HUGGINGFACE_TOKEN).text_generation("Hi.", max_new_tokens=1)
34
  text_available = True
35
  except Exception as e:
36
  print(e)
@@ -40,10 +40,10 @@ def get_available_free(use_cache = False):
40
  print("Rate Limited!!")
41
  if os.path.exists("data.csv"):
42
  print("Loading data from file...")
43
- return pd.read_csv("data.csv").to_dict(orient='list')
44
  return []
45
  try:
46
- InferenceClient(m, timeout=60).chat_completion(messages=[{'role': 'user', 'content': 'Hi.'}], max_tokens=1)
47
  chat_available = True
48
  except Exception as e:
49
  print(e)
@@ -60,7 +60,7 @@ def get_available_free(use_cache = False):
60
  models_conclusion["Chat Completion"].append("---" if (pro_sub or (not chat_available and not text_available)) else ("βœ“" if chat_available else "βŒ€"))
61
  models_conclusion["Text Completion"].append("---" if (pro_sub or (not chat_available and not text_available)) else ("βœ“" if text_available else "βŒ€"))
62
  models_conclusion["Vision"].append("βœ“" if vision_available else "βŒ€")
63
- pd.DataFrame(models_conclusion).to_csv("data.csv", index=False)
64
  return models_conclusion
65
 
66
  def update_data(use_cache = False):
 
7
  HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
8
  def get_available_free(use_cache = False):
9
  if use_cache:
10
+ if os.path.exists("./data.csv"):
11
  print("Loading data from file...")
12
  return pd.read_csv("data.csv").to_dict(orient='list')
13
  models_dict = InferenceClient(token=HUGGINGFACE_TOKEN).list_deployed_models("text-generation-inference")
 
30
  vision_available = True
31
  pro_sub = False
32
  try:
33
+ InferenceClient(m, timeout=120, token=HUGGINGFACE_TOKEN).text_generation("Hi.", max_new_tokens=1)
34
  text_available = True
35
  except Exception as e:
36
  print(e)
 
40
  print("Rate Limited!!")
41
  if os.path.exists("data.csv"):
42
  print("Loading data from file...")
43
+ return pd.read_csv("./data.csv").to_dict(orient='list')
44
  return []
45
  try:
46
+ InferenceClient(m, timeout=120).chat_completion(messages=[{'role': 'user', 'content': 'Hi.'}], max_tokens=1)
47
  chat_available = True
48
  except Exception as e:
49
  print(e)
 
60
  models_conclusion["Chat Completion"].append("---" if (pro_sub or (not chat_available and not text_available)) else ("βœ“" if chat_available else "βŒ€"))
61
  models_conclusion["Text Completion"].append("---" if (pro_sub or (not chat_available and not text_available)) else ("βœ“" if text_available else "βŒ€"))
62
  models_conclusion["Vision"].append("βœ“" if vision_available else "βŒ€")
63
+ pd.DataFrame(models_conclusion).to_csv("./data.csv", index=False)
64
  return models_conclusion
65
 
66
  def update_data(use_cache = False):