Hjgugugjhuhjggg commited on
Commit
c0c18e3
1 Parent(s): 3db1f6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -4
app.py CHANGED
@@ -56,7 +56,13 @@ class ModelManager:
56
  def load_model(self, model_config):
57
  if model_config['name'] not in self.models:
58
  try:
59
- self.models[model_config['name']] = Llama.from_pretrained(repo_id=model_config['repo_id'], filename=model_config['filename'], use_auth_token=HUGGINGFACE_TOKEN)
 
 
 
 
 
 
60
  except Exception as e:
61
  print(f"Error loading model {model_config['name']}: {e}")
62
 
@@ -91,11 +97,13 @@ def remove_duplicates(text):
91
  @spaces.GPU(queue=False, allow_gpu_memory=True, timeout=0, duration=0)
92
  def generate_model_response(model, inputs):
93
  try:
 
94
  response = model(inputs)
 
95
  return remove_duplicates(response['choices'][0]['text'])
96
  except Exception as e:
97
- print(f"Error generating model response: {e}")
98
- return ""
99
 
100
  def remove_repetitive_responses(responses):
101
  unique_responses = {}
@@ -111,7 +119,15 @@ async def process_message(message):
111
  executor.submit(generate_model_response, model, inputs)
112
  for model in global_data['models'].values()
113
  ]
114
- responses = [{'model': model_name, 'response': future.result()} for model_name, future in zip(global_data['models'].keys(), as_completed(futures))]
 
 
 
 
 
 
 
 
115
  unique_responses = remove_repetitive_responses(responses)
116
  formatted_response = ""
117
  for model, response in unique_responses.items():
 
56
  def load_model(self, model_config):
57
  if model_config['name'] not in self.models:
58
  try:
59
+ print(f"Loading model {model_config['name']}...")
60
+ self.models[model_config['name']] = Llama.from_pretrained(
61
+ repo_id=model_config['repo_id'],
62
+ filename=model_config['filename'],
63
+ use_auth_token=HUGGINGFACE_TOKEN
64
+ )
65
+ print(f"Model {model_config['name']} loaded successfully.")
66
  except Exception as e:
67
  print(f"Error loading model {model_config['name']}: {e}")
68
 
 
97
  @spaces.GPU(queue=False, allow_gpu_memory=True, timeout=0, duration=0)
98
  def generate_model_response(model, inputs):
99
  try:
100
+ print(f"Generating response for model: {model}")
101
  response = model(inputs)
102
+ print(f"Response from {model}: {response}")
103
  return remove_duplicates(response['choices'][0]['text'])
104
  except Exception as e:
105
+ print(f"Error generating model response from {model}: {e}")
106
+ return "Error generating response."
107
 
108
  def remove_repetitive_responses(responses):
109
  unique_responses = {}
 
119
  executor.submit(generate_model_response, model, inputs)
120
  for model in global_data['models'].values()
121
  ]
122
+ responses = []
123
+ for future in as_completed(futures):
124
+ try:
125
+ response = future.result()
126
+ responses.append(response)
127
+ except Exception as e:
128
+ print(f"Error with model: {e}")
129
+ responses.append("Error generating response.") # O un mensaje predeterminado de error
130
+
131
  unique_responses = remove_repetitive_responses(responses)
132
  formatted_response = ""
133
  for model, response in unique_responses.items():