Spaces:
Sleeping
Sleeping
Commit
·
c0b29e2
1
Parent(s):
c0cbe6c
test cv extraction
Browse files
app.py
CHANGED
@@ -28,8 +28,8 @@ load_dotenv()
|
|
28 |
api_token = os.getenv("HF_TOKEN")
|
29 |
huggingface_hub.login(token=api_token)
|
30 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
31 |
-
tokenizer = AutoTokenizer.from_pretrained('google/gemma-2-
|
32 |
-
model = AutoModelForCausalLM.from_pretrained('google/gemma-2-
|
33 |
|
34 |
#@spaces.GPU
|
35 |
def read_pdf(file_path):
|
@@ -57,7 +57,7 @@ def read_pdf(file_path):
|
|
57 |
return output
|
58 |
|
59 |
|
60 |
-
@spaces.GPU(duration=
|
61 |
def LLM_Inference(cv_text):
|
62 |
text = f'''
|
63 |
You are an AI designed to extract structured information from unstructured text. Your task is to analyze the content of a candidate's CV and extract the following details:
|
|
|
28 |
api_token = os.getenv("HF_TOKEN")
|
29 |
huggingface_hub.login(token=api_token)
|
30 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
31 |
+
tokenizer = AutoTokenizer.from_pretrained('google/gemma-2-9b')
|
32 |
+
model = AutoModelForCausalLM.from_pretrained('google/gemma-2-9b').to(device)
|
33 |
|
34 |
#@spaces.GPU
|
35 |
def read_pdf(file_path):
|
|
|
57 |
return output
|
58 |
|
59 |
|
60 |
+
@spaces.GPU(duration=30)
|
61 |
def LLM_Inference(cv_text):
|
62 |
text = f'''
|
63 |
You are an AI designed to extract structured information from unstructured text. Your task is to analyze the content of a candidate's CV and extract the following details:
|