Spaces:
Sleeping
Sleeping
File size: 3,677 Bytes
5f07ce9 47125e3 0b87fda 5f07ce9 dc5e316 3aaed3a 5f07ce9 5672db4 5f07ce9 29f3079 8c84ff9 29f3079 7f44fbb dc5e316 00973fa 60f3b09 00973fa dc5e316 446641a 00973fa 5f07ce9 8c84ff9 5f07ce9 8c84ff9 954c6c1 8c84ff9 954c6c1 8c84ff9 5f07ce9 dc5e316 593d86e 590aabd dc5e316 593d86e 7f44fbb 590aabd dc5e316 fd8442d dc5e316 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import gradio as gr
from PyPDF2 import PdfReader
import zipfile
import os
import io
import nltk
import openai
import time
import pip
import subprocess
import sys
# install required libraries
subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
# download required NLTK data packages
nltk.download('punkt')
# Put your OpenAI API key here
openai.api_key = os.getenv('OpenAPI')
def create_persona(text):
max_retries = 5
for attempt in range(max_retries):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a knowledgeable assistant."},
{"role": "user", "content": f"Create a persona based on this text: {text}"},
]
)
return response['choices'][0]['message']['content']
except Exception as e:
if attempt < max_retries - 1: # if it's not the last attempt
time.sleep(1) # wait for 1 seconds before retrying
continue
else:
return str(e) # return the exception message after the last attempt
def call_openai_api(persona, user_prompt):
max_retries = 5
for attempt in range(max_retries):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": f"You are {persona}"},
{"role": "user", "content": f"""Ignore all previous instructions. As a Cognitive AI Agent your persona is:{persona}
You will answer only as an expert in cognitive workplace and Humancentric leadership.
All answers must relate to HumanCentric services or cognitive workplace. {user_prompt}"""},
]
)
return response['choices'][0]['message']['content']
except Exception as e:
if attempt < max_retries - 1: # if it's not the last attempt
time.sleep(1) # wait for 1 seconds before retrying
continue
else:
return str(e) # return the exception message after the last attempt
def pdf_to_text(file, user_prompt):
z = zipfile.ZipFile(file.name, 'r')
aggregated_text = ''
for filename in z.namelist():
if filename.endswith('.pdf'):
pdf_file_data = z.read(filename)
pdf_file_io = io.BytesIO(pdf_file_data)
pdf = PdfReader(pdf_file_io)
for page in pdf.pages:
aggregated_text += page.extract_text()
# Tokenize aggregated_text
tokens = nltk.word_tokenize(aggregated_text)
# Split into chunks if tokens are more than 4096
if len(tokens) > 4096:
# Here you may choose the strategy that fits best.
# For instance, the first 4096 tokens could be used.
tokens = tokens[:4096]
# Create a single persona from all text
persona = create_persona(' '.join(tokens))
# Using OpenAI API
response = call_openai_api(persona, user_prompt)
return response
iface = gr.Interface(
fn=pdf_to_text,
inputs=[
gr.inputs.File(label="PDF File (Upload a Zip file containing ONLY PDF files)"),
gr.inputs.Textbox(label="User Prompt (Enter a prompt to interact with your persona)")
],
outputs=gr.outputs.Textbox(label="Cognitive Agent Response"),
title="Ask An Expert Proof Of Concept",
description="This app extracts knowledge from the uploaded Zip files. The Cognitive Agent will use this data to build your unique persona."
)
iface.launch(share=False)
|