Spaces:
Runtime error
Runtime error
import openai | |
import numpy as np | |
import pandas as pd | |
import os | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.embeddings.huggingface import HuggingFaceEmbeddings | |
from langchain import HuggingFaceHub | |
from langchain.vectorstores import Chroma | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.llms import OpenAI | |
from langchain.chains import RetrievalQA | |
from langchain.chains import VectorDBQA | |
from langchain.document_loaders import TextLoader | |
from langchain.document_loaders import UnstructuredFileLoader | |
from flask import Flask, jsonify, render_template, request | |
from werkzeug.utils import secure_filename | |
from werkzeug.datastructures import FileStorage | |
import nltk | |
nltk.download("punkt") | |
import warnings | |
warnings.filterwarnings("ignore") | |
openai.api_key=os.getenv("OPENAI_API_KEY") | |
import flask | |
import os | |
from dotenv import load_dotenv | |
load_dotenv() | |
loader = UnstructuredFileLoader('Jio.txt', mode='elements') | |
documents= loader.load() | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | |
texts = text_splitter.split_documents(documents) | |
embeddings = OpenAIEmbeddings() | |
doc_search = Chroma.from_documents(texts,embeddings) | |
chain = VectorDBQA.from_chain_type(llm=OpenAI(temperature=0.0), chain_type="stuff", vectorstore=doc_search) | |
app = flask.Flask(__name__, template_folder="./") | |
# Create a directory in a known location to save files to. | |
uploads_dir = os.path.join(app.root_path,'static', 'uploads') | |
os.makedirs(uploads_dir, exist_ok=True) | |
def index(): | |
return flask.render_template('index.html') | |
def process_json(): | |
content_type = request.headers.get('Content-Type') | |
if (content_type == 'application/json'): | |
requestQuery = request.get_json() | |
response= chain.run(requestQuery['query']) | |
print("Ques:>>>>"+requestQuery['query']+"\n Ans:>>>"+response) | |
return jsonify(botMessage=response); | |
else: | |
return 'Content-Type not supported!' | |
def file_Upload(): | |
#print(request.headers.get('Content-Type')) | |
file=request.files['file'] | |
print(uploads_dir) | |
global chain; | |
file.save(os.path.join(uploads_dir, secure_filename(file.filename))) | |
loader = UnstructuredFileLoader(os.path.join(uploads_dir, secure_filename(file.filename)), mode='elements') | |
documents= loader.load() | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | |
texts = text_splitter.split_documents(documents) | |
embeddings = OpenAIEmbeddings() | |
doc_search = Chroma.from_documents(texts,embeddings) | |
chain = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=doc_search) | |
return render_template("index.html") | |
def KBUpload(): | |
return render_template("KBTrain.html") | |
def aiassist(): | |
return render_template("index.html") | |
if __name__ == '__main__': | |
app.run(host='0.0.0.0', port=int(os.environ.get('PORT', 7860))) | |