# -*- coding: utf-8 -*- """Deployment.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1RtXMnveLECPLSum0IJcSGtQTk1pGRjNE # Proof of Concept: Breakdown: 1. One must first load the dataset that our group created on Mockaroo based on the guidelines given to us by the client. This dataset models a food delivery business that has 4 tables: Driver, Customer, Orders and Customer support. Each table has various types of data spanning from strings, ints to unique ids. Tables are linked by ids as well. 2. Using the textblob library, we run spell checking on the user input in order to avoid any query generation issues due to misspelt words. 3. We use spacy in order to run named entity recognition; these entities will be used in step 4. 4. Using the named entities and a list of unique values from the dataset, we use tensorflow embeddings and cosine similarity to find the column value most likely being referenced in the user's query. For instance, an input of San Francisco Jail would have a strong cosine similarity with the actual value from the client's column: San Francisco Penitentiary. After the correct name has been found we use regex to substitute the corrected name in place of the user input. 5. Finally, we do the actual query translation from plain text. We first input the formatted query and send it to openai that has already been fed the schema for the query. We then receive the SQL query and call our own hand-crafted SQL-to-MongoDB method that converts into a final MongoDB query. ### User Instructions For the code to function, you need to load the four datasets (driver_data, cust_data, order_data, cust_service_data) from the github repo into your google drive as outlined in the following cells. Our main method first asks the user for their openai key. Then we have some test cases that may contain noun spelling issues, name spelling issues, etc. """ """### **Attention**: Upload all four datasets into your MyDrive directory in google drive""" import pandas as pd import spacy import en_core_web_sm import tensorflow_hub as hub from scipy.spatial import distance from numpy.core.fromnumeric import argmax import openai import re import gradio as gr driver = pd.read_csv('driver_data.csv') customer = pd.read_csv('customer_data.csv') order = pd.read_csv('order_data.csv') service = pd.read_csv('cust_service_data.csv') """# Entity Extraction""" # extract entities, label, label definition from natural language questions and append to dataframe nlp = spacy.load("en_core_web_sm") def EntityExtraction(text:str): # print(text) entities = [] entities_label = [] label_explanation = {} doc = nlp(text) for entity in doc.ents: entities.append(entity.text) entities_label.append(entity.label_) label_explanation[entity.label_] = spacy.explain(entity.label_) return entities, entities_label """# Column Cosine Similarity""" #creating a dictionary of unique values in the dataset #Used for cosine similarity unique_values = {} for column in driver: unique_values[column] = driver[column].unique() for column in customer: unique_values[column] = customer[column].unique() for column in order: if column in ['cust_id', 'driver_id']: unique_values[column] = order[column].unique() unique_values['sales_id'] = service['sales_id'].unique() embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder/4") # Uses TF word embeddings to find the word/phrase in words[1:] most related # to words[0] def ClosestSimilarity(words): embeddings = embed(words) similarities = [1 - distance.cosine(embeddings[0],x) for x in embeddings[1:]] return max(similarities), argmax(similarities) def find_column(item, array = unique_values): best_similarity = 0 best_item = None best_key = None for key in array: values = [str(x) for x in unique_values[key]] values = [item] + values max_similarity, item_similar = ClosestSimilarity(values) if not best_similarity or max_similarity > best_similarity: best_similarity = max_similarity best_item = unique_values[key][item_similar] best_key = key if best_similarity < 0.2: return best_key, item return best_key, best_item """# Query to SQL to MongoDB""" def query_to_SQL_to_MongoDB(query, key, organization): openai.api_key = key # put in the unique key openai.organization = organization # sets the specific parameters of the openai var response = openai.Completion.create( # use the appropriate SQL model and set the parameters accordingly model="text-davinci-003", prompt="### Postgres SQL tables, with their properties:\n#\n# Customer_Support(sales_id, order_id, date)\n# Driver(driver_id, driver_name, driver_address, driver_experience)\n# Customer(cust_id, cust_name, cust_address)\n# Orders(order_id, cust_id, driver_id, date, amount)\n#\n### A query to " + query + ".\nSELECT", temperature=0, max_tokens=150, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0, stop=["#", ";"] ) SQL = response['choices'][0]['text'] # extract the outputted SQL Query return complex_SQL_to_MongoDB(SQL) def complex_SQL_to_MongoDB(query): keywords = {'INNER', 'FROM', 'WHERE', 'GROUP', 'BY', 'ON', 'SELECT', 'BETWEEN', 'LIMIT', 'AND', 'ORDER'} # keyword set used by my MongoDB function mapper = {} # maps SQL symbols to MongoDB functions mapper['<'] = '$lt' mapper['>'] = '$gt' mapper['!='] = '$ne' query = re.split(r' |\n', query) # split the query on spaces and turn in to array query = [ x for x in query if len(x) > 0] # remove empty strings in the array while query[0][:3] not in ['MAX', 'MIN'] and query[0][:5] != 'COUNT' and query[0] not in keywords: query = query[1:] if query[1] == 'AS': rename = query[2] for i in range(3, len(query)): if query[i] == rename: query[i] = query[0] if len(query[0]) > 3 and (query[0][:3] == 'MAX' or query[0][:3] == 'MIN'): # if the SQL contains a MAX or MIN select then we rewrite the SQL query in an easier format query += ['ORDER', 'BY', query[0][4:-1], 'DESC' if query[0][:3] == 'MAX' else 'ASC', 'LIMIT', '1'] count_str = '' # builds a MongoDB statement if there is a count in the select statement if len(query[0]) > 5 and query[0][:5] == 'COUNT': # if there is indeed a count count_str += ' {$count : ' # construct the count sequence if query[0][6] == '*': count_str += '{} }' # an asterisk means everything else: count_str += query[0][6:-1] + ' }' # otherwise write the actual field it wants count_str += ',' i = 0 # iterator variable while query[i] != 'FROM': # as long as we are still in the select continue because you cannot do select in db.Aggregate i += 1 i = i +1 # ignore the FROM collection = query[i] # table from which the information will be taken i = i + 1 if i < len(query) and query[i] not in keywords: # sometimes SQL queries rename tables but we ignore that in MongoDB i += 1 answer = 'db.' + collection + ".aggregate( " # MongoDB function for aggregation while i < len(query) and query[i] == 'INNER': # if there is an inner join i = i + 2 # ignore the keywords lookup = '{$lookup: { from : "' # MongoDB structure lookup += query[i] + '", localField: "' # specifies home key if query[i+1] not in keywords: # skip renaming of tables i += 1 i = i + 2 lookup += query[i].split('.')[1] + '", foreignField: "' # specifies foreign key i = i+2 lookup += query[i].split('.')[1] + '", as: "' + collection + '"} },' # rename final table to the original table i = i + 1 answer += lookup # add this to the MongoDB query if i < len(query) and query[i] == 'WHERE': # if there is a WHERE clause where = '{$match:' # MongoB keyword count = 0 # tells us if there is an AND in the where clause conditions = '' # stores the actual conditions required while i < len(query) and (query[i] == 'WHERE' or query[i] == 'AND'): count += 1 # add one every time you find a where matching i = i+1 conditions += '{' + (query[i].split('.')[1] if len(query[i].split('.')) > 1 else query[i] ) + " : " # format to MongoDB if query[i+1] == '=': # if there is an equality then use a colon conditions += query[i+2] i = i + 3 elif query[i+1] == 'BETWEEN': # if there are dates then use the specified date format conditions += '{$gt: ISODate(' + query[i+2] + '), $lt: ISODate(' + query[i+4] + ')}' i+= 5 else: # else use the mapper function to map the write symbol here conditions += '{ ' + mapper[query[i+1]] + ' : ' + query[i+2] + ' }' i = i+3 conditions += '},' # end the conditions if count > 1: # if you have been in there for more than once then where += '{ $and: [' + conditions[:-1] + ']}}' # use the AND version of MongoDB else: where += conditions[:-1] + '},' # otherwise end the clause answer += where # add this to the final query if i < len(query) and (query[i] == 'GROUP' or query[i] == 'ORDER'): # if there is a Group BY or Order BY i = i + 2 group = '{$group: { _id: "' + query[i] + '"' # in any case, you use group in MongoDB i += 1 i -= 3 if query[i -3 ] == 'ORDER' else 0 # depending on which one you continue if i < len(query) and query[i] == 'ORDER' and len(query[i+2]) > 5 and query[i+2][0:5] == 'COUNT': # if there is an order by with count group += ', count: {$count: ' + ('{}' if query[i+2].split('(')[1][:-1] == '*' else ('{' + query[i+2].split('(')[1][:-1].split('.')[1] + '}') ) + '} }}, { $sort: {count : ' + ('1' if query[i+3] == 'ASC' else '-1') + '}},' elif i < len(query) and query[i] == 'ORDER': # if there is an order by without count group += '} }, { $sort: {' + query[i+2] + ' : ' + ('1' if query[i+3] == 'ASC' else '-1') + '}},' else:group += '} },' # if there is no orde by and only group i += 4 answer += group # add answer to group if i < len(query) and query[i] == 'LIMIT': # if there is a limit then add that too answer += '{ $limit : ' + query[i+1] + ' },' answer += '' if count_str == ',' else count_str# finally add back any count command answer = answer[:-1] answer += ')' # end the whole query return answer # return """# Main method""" def query_creator(key, organization, plain_query): # find named entities in text, e.g. names, addresses, etc. entities, entities_label = EntityExtraction(plain_query) modified_query = plain_query #print(entities) #print(entities_label) #For each named entity in the query for i in range(len(entities)): if entities_label[i] in ['ORDINAL', 'CARDINAL', 'DATE']: continue #Use cosine similarity on each entity to find closest matching string from tables. col, best_match = find_column(entities[i]) #substitute table string in place of partial match found in previous step modified_query = re.sub(entities[i],best_match,modified_query) print("Modified input: ", modified_query) #Convert adjusted plain text query to SQL, then MongoDB MongoDB_query = query_to_SQL_to_MongoDB(modified_query, key, organization) return MongoDB_query iface = gr.Interface(fn=query_creator, inputs= [gr.Textbox(label = "API Key"), gr.Textbox(label = "Organization Key"), gr.Textbox(label = "Plain Text Query")], outputs=gr.Textbox(label = "MongoDB Query"), ) iface.launch()