Muhammad541 commited on
Commit
f3434ed
·
verified ·
1 Parent(s): 2f4c67f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -1,9 +1,9 @@
 
1
  import pandas as pd
2
  import torch
3
  from sentence_transformers import SentenceTransformer, util
4
  import faiss
5
  import numpy as np
6
- import os
7
  import pickle
8
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
9
  import scipy.special
@@ -18,6 +18,9 @@ import logging
18
  logging.basicConfig(level=logging.INFO)
19
  logger = logging.getLogger(__name__)
20
 
 
 
 
21
  # Paths for saving artifacts
22
  MODEL_DIR = "./saved_models" # Primary location in /app/saved_models
23
  FALLBACK_MODEL_DIR = "/tmp/saved_models" # Fallback if ./saved_models fails
@@ -167,6 +170,7 @@ def evaluate_response(args):
167
  penalty = min(1.0, max(0.5, skill_relevance))
168
  score *= penalty
169
 
 
170
  return skill, round(max(0, score), 2), is_ai_generated
171
 
172
  # Recommend Courses
@@ -299,6 +303,7 @@ def assess_skills():
299
  eval_args = [(skill, user_code, question) for skill, user_code, question in user_responses if user_code]
300
  logger.info(f"Evaluating {len(eval_args)} answers using multiprocessing pool.")
301
  results = pool.map(evaluate_response, eval_args)
 
302
  except Exception as e:
303
  logger.error(f"Error in evaluate_response: {str(e)}", exc_info=True)
304
  return jsonify({"error": "Failed to evaluate answers due to an internal error."}), 500
 
1
+ import os
2
  import pandas as pd
3
  import torch
4
  from sentence_transformers import SentenceTransformer, util
5
  import faiss
6
  import numpy as np
 
7
  import pickle
8
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
9
  import scipy.special
 
18
  logging.basicConfig(level=logging.INFO)
19
  logger = logging.getLogger(__name__)
20
 
21
+ # Disable tokenizers parallelism to avoid fork-related deadlocks
22
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
23
+
24
  # Paths for saving artifacts
25
  MODEL_DIR = "./saved_models" # Primary location in /app/saved_models
26
  FALLBACK_MODEL_DIR = "/tmp/saved_models" # Fallback if ./saved_models fails
 
170
  penalty = min(1.0, max(0.5, skill_relevance))
171
  score *= penalty
172
 
173
+ logger.debug(f"Evaluated {skill}: score={score:.2f}, is_ai={is_ai_generated}")
174
  return skill, round(max(0, score), 2), is_ai_generated
175
 
176
  # Recommend Courses
 
303
  eval_args = [(skill, user_code, question) for skill, user_code, question in user_responses if user_code]
304
  logger.info(f"Evaluating {len(eval_args)} answers using multiprocessing pool.")
305
  results = pool.map(evaluate_response, eval_args)
306
+ logger.info(f"Evaluation results: {results}")
307
  except Exception as e:
308
  logger.error(f"Error in evaluate_response: {str(e)}", exc_info=True)
309
  return jsonify({"error": "Failed to evaluate answers due to an internal error."}), 500