Spaces:
Sleeping
Sleeping
File size: 2,178 Bytes
2534516 db7aed9 2534516 db7aed9 2534516 db7aed9 27d2b1f 2534516 27d2b1f db7aed9 27d2b1f 2534516 db7aed9 27d2b1f 2534516 db7aed9 27d2b1f db7aed9 27d2b1f 2534516 27d2b1f db7aed9 27d2b1f 2534516 db7aed9 2534516 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
import torch
from flask import Flask, request, jsonify
from langchain_community.llms import LlamaCpp
from sentence_transformers import SentenceTransformer
from transformers import AutoModel
import os
app = Flask(__name__)
n_gpu_layers = 0
n_batch = 1024
llm = LlamaCpp(
model_path="Phi-3-mini-4k-instruct-q4.gguf", # path to GGUF file
temperature=0.1,
n_gpu_layers=n_gpu_layers,
n_batch=n_batch,
verbose=True,
n_ctx=4096
)
model = SentenceTransformer('sentence-transformers/paraphrase-TinyBERT-L6-v2')
file_size = os.stat('Phi-3-mini-4k-instruct-q4.gguf')
print("model size ====> :", file_size.st_size, "bytes")
@app.route('/cv', methods=['POST'])
def get_skills():
cv_body = request.json.get('cv_body')
# Simple inference example
output = llm(
f"\n{cv_body}\nCan you list the skills mentioned in the CV?",
max_tokens=256, # Generate up to 256 tokens
stop=[""],
echo=True, # Whether to echo the prompt
)
return jsonify({'skills': output})
@app.get('/')
def health():
return jsonify({'status': 'Worked'})
@app.route('/compare', methods=['POST'])
def compare():
jobs_skills = request.json.get('job_skills')
employee_skills = request.json.get('employee_skills')
# Validation
if not isinstance(jobs_skills, list) or not all(isinstance(skill, str) for skill in jobs_skills):
raise ValueError("job_skills must be a list of strings")
# Encoding skills into embeddings
employee_embeddings = model.encode(employee_skills)
job_embeddings = model.encode(jobs_skills)
# Computing cosine similarity between employee skills and each job
similarity_scores = []
employee_embeddings_tensor = torch.from_numpy(employee_embeddings).unsqueeze(0)
for i, job_e in enumerate(job_embeddings):
job_e_tensor = torch.from_numpy(job_e).unsqueeze(0)
similarity_score = torch.nn.functional.cosine_similarity(employee_embeddings_tensor, job_e_tensor, dim=1)
similarity_scores.append({"job": jobs_skills[i], "similarity_score": similarity_score.item()})
return jsonify(similarity_scores)
if __name__ == '__main__':
app.run()
|