Spaces:
Sleeping
Sleeping
Commit
·
021d971
1
Parent(s):
e1cd0d3
test cv extraction
Browse files
app.py
CHANGED
@@ -23,8 +23,13 @@ from dotenv import load_dotenv
|
|
23 |
from huggingface_hub import InferenceClient
|
24 |
import huggingface_hub
|
25 |
#zero = torch.Tensor([0]).cuda()
|
|
|
26 |
load_dotenv()
|
27 |
api_token = os.getenv("HF_TOKEN")
|
|
|
|
|
|
|
|
|
28 |
|
29 |
#@spaces.GPU
|
30 |
def read_pdf(file_path):
|
@@ -54,11 +59,6 @@ def read_pdf(file_path):
|
|
54 |
|
55 |
@spaces.GPU(duration=15)
|
56 |
def LLM_Inference(cv_text):
|
57 |
-
huggingface_hub.login(token=api_token)
|
58 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
59 |
-
tokenizer = AutoTokenizer.from_pretrained('google/gemma-2-2b-it')
|
60 |
-
model = AutoModelForCausalLM.from_pretrained('google/gemma-2-2b-it').to(device)
|
61 |
-
|
62 |
text = f'''
|
63 |
You are an AI designed to extract structured information from unstructured text. Your task is to analyze the content of a candidate's CV and extract the following details:
|
64 |
|
|
|
23 |
from huggingface_hub import InferenceClient
|
24 |
import huggingface_hub
|
25 |
#zero = torch.Tensor([0]).cuda()
|
26 |
+
|
27 |
load_dotenv()
|
28 |
api_token = os.getenv("HF_TOKEN")
|
29 |
+
huggingface_hub.login(token=api_token)
|
30 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
31 |
+
tokenizer = AutoTokenizer.from_pretrained('google/gemma-2-2b-it')
|
32 |
+
model = AutoModelForCausalLM.from_pretrained('google/gemma-2-2b-it').to(device)
|
33 |
|
34 |
#@spaces.GPU
|
35 |
def read_pdf(file_path):
|
|
|
59 |
|
60 |
@spaces.GPU(duration=15)
|
61 |
def LLM_Inference(cv_text):
|
|
|
|
|
|
|
|
|
|
|
62 |
text = f'''
|
63 |
You are an AI designed to extract structured information from unstructured text. Your task is to analyze the content of a candidate's CV and extract the following details:
|
64 |
|