Spaces:
Sleeping
Sleeping
import requests | |
import json | |
import re | |
class VectaraQuery(): | |
def __init__(self, api_key: str, customer_id: str, corpus_id: str, prompt_name: str = None): | |
self.customer_id = customer_id | |
self.corpus_id = corpus_id | |
self.api_key = api_key | |
self.prompt_name = prompt_name if prompt_name else "vectara-experimental-summary-ext-2023-12-11-large" | |
self.conv_id = None | |
def get_body(self, user_response: str): | |
corpora_key_list = [{ | |
'customer_id': self.customer_id, 'corpus_id': self.corpus_id, 'lexical_interpolation_config': {'lambda': 0.025} | |
}] | |
user_response = user_response.replace('"', '\\"') # Escape double quotes | |
prompt = f''' | |
[ | |
{{ | |
"role": "system", | |
"content": "You are an assistant that provides information about drink names based on a given corpus." | |
}}, | |
{{ | |
"role": "user", | |
"content": "{user_response}" | |
}} | |
] | |
''' | |
return { | |
'query': [ | |
{ | |
'query': user_response, | |
'start': 0, | |
'numResults': 10, | |
'corpusKey': corpora_key_list, | |
'context_config': { | |
'sentences_before': 2, | |
'sentences_after': 2, | |
'start_tag': "%START_SNIPPET%", | |
'end_tag': "%END_SNIPPET%", | |
} | |
} | |
] | |
} | |
def get_headers(self): | |
return { | |
"Content-Type": "application/json", | |
"Accept": "application/json", | |
"customer-id": self.customer_id, | |
"x-api-key": self.api_key, | |
"grpc-timeout": "60S" | |
} | |
def submit_query(self, query_str: str): | |
endpoint = f"https://api.vectara.io/v1/stream-query" | |
body = self.get_body(query_str) | |
response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers(), stream=True) | |
if response.status_code != 200: | |
print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}") | |
return "Sorry, something went wrong. Please try again later." | |
chunks = [] | |
accumulated_text = "" # Initialize text accumulation | |
pattern_max_length = 50 # Example heuristic | |
for line in response.iter_lines(): | |
if line: # filter out keep-alive new lines | |
data = json.loads(line.decode('utf-8')) | |
res = data['result'] | |
response_set = res['responseSet'] | |
if response_set: | |
for result in response_set: | |
text = result['text'] | |
# Extract relevant information from the text | |
reason = re.search(r"Reason Why it Can't be Used: (.*?)\n", text).group(1) | |
alternative = re.search(r"Alternative: (.*?)\n", text).group(1) | |
notes = re.search(r"Notes: (.*?)\n", text).group(1) | |
response = f"Reason: {reason}\nAlternative: {alternative}\nNotes: {notes}" | |
return response | |
return "No relevant information found." | |