File size: 3,546 Bytes
9ba3ade d16f678 9c62372 a81ff23 44ef745 7a1124b 44ef745 a81ff23 d16f678 a81ff23 d16f678 d92c861 9c62372 d92c861 9c62372 44ef745 9c62372 44ef745 9c62372 44ef745 9c62372 44ef745 9c62372 44ef745 9c62372 44ef745 a81ff23 44ef745 a81ff23 44ef745 a81ff23 44ef745 9c62372 44ef745 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
try: from pip._internal.operations import freeze
except ImportError: # pip < 10.0
from pip.operations import freeze
pkgs = freeze.freeze()
for pkg in pkgs: print(pkg)
import os
from fastapi import FastAPI, HTTPException, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from PyPDF2 import PdfReader
import google.generativeai as genai
import json
import base64
from io import BytesIO
from PIL import Image
import requests
secret = os.environ["key"]
genai.configure(api_key=secret)
model_vision = genai.GenerativeModel('gemini-pro-vision')
model_text = genai.GenerativeModel('gemini-pro')
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def encode_image(image):
# Convert image to BytesIO object (in memory)
buffered = BytesIO()
image.save(buffered, format=image.format) # Use the original image format (e.g., PNG, JPEG)
img_bytes = buffered.getvalue()
# Encode image to base64
base64_image = base64.b64encode(img_bytes).decode('utf-8')
return base64_image
def vision(image):
# OpenAI API Key
api_key = "sk-proj-1j1aFDCU8KrWAeFMAGPPT3BlbkFJ6rDxGgu8C99E3Wh6siUs"
# Getting the base64 string
base64_image = encode_image(image)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
payload = {
"model": "gpt-4o-mini",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "extract all data from this image"
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
],
"max_tokens": 300
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
print(response.json()['choices'][0]['message']['content'])
@app.post("/get_ocr_data/")
async def get_data(input_file: UploadFile = File(...)):
try:
# Determine the file type by reading the first few bytes
file_content = await input_file.read()
file_type = input_file.content_type
text = ""
if file_type == "application/pdf":
# Read PDF file using PyPDF2
pdf_reader = PdfReader(io.BytesIO(file_content))
for page in pdf_reader.pages:
text += page.extract_text()
elif file_type in ["image/jpeg", "image/png", "image/jpg"]:
# Read Image file using PIL and pytesseract
image = Image.open(io.BytesIO(file_content))
return encode_image(image)
text = vision(image)
else:
raise HTTPException(status_code=400, detail="Unsupported file type")
# Call Gemini (or another model) to extract required data
prompt = f"""This is CV data: {text.strip()}
I want only:
firstname, lastname, contact number, total years of experience, LinkedIn link, experience, skills
in JSON format only"""
response = model_text.generate_content(prompt)
data = json.loads(response.text.replace("```json", "").replace("```", ""))
return {"data": data}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error processing file: {str(e)}")
|