File size: 15,639 Bytes
f560388 2ecca1e afc3996 f560388 afc3996 f560388 8006306 f560388 7c1c2ae 2ecca1e 1d33079 2ecca1e f560388 2ecca1e f560388 2ecca1e f560388 2ecca1e f560388 cb4becf f560388 cb4becf f560388 cb4becf 2ecca1e 451d492 2ecca1e 451d492 cb4becf 2ecca1e cb4becf 2ecca1e cb4becf f560388 cb4becf f560388 4e18d60 f669fa2 f560388 f669fa2 f560388 ce4d370 c66249c ce4d370 451d492 ce4d370 451d492 c66249c ce4d370 451d492 c66249c ce4d370 451d492 f669fa2 c66249c 451d492 ce4d370 f669fa2 ce4d370 c66249c ce4d370 451d492 c66249c ce4d370 c66249c f669fa2 8006306 451d492 8006306 451d492 8006306 f669fa2 8006306 451d492 8006306 afc3996 f560388 3149505 f560388 1d33079 f560388 3149505 f560388 1d33079 e4026af f560388 451d492 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 |
import os
from datetime import datetime
import json
from typing import Any, Dict, List, Tuple, Union
import requests
import numpy as np
import pandas as pd
import PyPDF2
from openai import OpenAI
from together import Together
# Credit
def current_year():
now = datetime.now()
return now.year
def read_and_textify(
files: List[str], chunk_size: int = 2 # Default chunk size set to 50
) -> Tuple[List[str], List[str]]:
"""
Reads PDF files and extracts text from each page, breaking the text into specified segments.
This function iterates over a list of uploaded PDF files, extracts text from each page,
and compiles a list of texts and corresponding source information, segmented into smaller parts
of approximately 'chunk_size' words each.
Args:
files (List[st.uploaded_file_manager.UploadedFile]): A list of uploaded PDF files.
chunk_size (int): The number of words per text segment. Default is 50.
Returns:
Tuple[List[str], List[str]]: A tuple containing two lists:
1. A list of strings, where each string is a segment of text extracted from a PDF page.
2. A list of strings indicating the source of each text segment (file name, page number, and segment number).
"""
text_list = [] # List to store extracted text segments
sources_list = [] # List to store source information
# Iterate over each file
for file in files:
pdfReader = PyPDF2.PdfReader(file) # Create a PDF reader object
# Iterate over each page in the PDF
for i in range(len(pdfReader.pages)):
pageObj = pdfReader.pages[i] # Get the page object
text = pageObj.extract_text() # Extract text from the page
if text:
# Split text into chunks of approximately 'chunk_size' words
words = text.split(". ")
for j in range(0, len(words), chunk_size):
chunk = ". ".join(words[j : j + chunk_size]) + "."
text_list.append(chunk)
# Create a source identifier for each chunk and add it to the list
sources_list.append(f"{file.name}_page_{i}_chunk_{j // chunk_size}")
else:
# If no text extracted, still add a placeholder
text_list.append("")
sources_list.append(f"{file.name}_page_{i}_chunk_0")
pageObj.clear() # Clear the page object (optional, for memory management)
return text_list, sources_list
def read_and_textify_advanced(
files: List[str], chunk_size: int = 2 # Default chunk size set to 50
) -> Tuple[List[str], List[str]]:
"""
Reads PDF files and extracts text from each page, breaking the text into specified segments.
This function iterates over a list of uploaded PDF files, extracts text from each page,
and compiles a list of texts and corresponding source information, segmented into smaller parts
of approximately 'chunk_size' words each.
Args:
files (List[st.uploaded_file_manager.UploadedFile]): A list of uploaded PDF files.
chunk_size (int): The number of words per text segment. Default is 50.
Returns:
Tuple[List[str], List[str]]: A tuple containing two lists:
1. A list of strings, where each string is a segment of text extracted from a PDF page.
2. A list of strings indicating the source of each text segment (file name, page number, and segment number).
"""
text_list = [] # List to store extracted text segments
sources_list = [] # List to store source information
# Iterate over each file
for file in files:
pdfReader = PyPDF2.PdfReader(file) # Create a PDF reader object
# Iterate over each page in the PDF
for i in range(len(pdfReader.pages)):
pageObj = pdfReader.pages[i] # Get the page object
text = pageObj.extract_text() # Extract text from the page
if text:
# Split text into chunks of approximately 'chunk_size' words
words = text.split(". ")
for j in range(len(words)):
# Get the chunk of text from j-chunk_size to j+chunk_size
start = max(0, j - chunk_size)
end = min(len(words), j + chunk_size + 1)
chunk = ". ".join(words[start:end]) + '.'
text_list.append(chunk)
# Create a source identifier for each chunk and add it to the list
sources_list.append(f"{file.name}_page_{i}_chunk_{j}")
else:
# If no text extracted, still add a placeholder
text_list.append("")
sources_list.append(f"{file.name}_page_{i}_chunk_0")
pageObj.clear() # Clear the page object (optional, for memory management)
return text_list, sources_list
openai_client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
def list_to_nums(sentences: List[str]) -> List[List[float]]:
"""
Converts a list of sentences into a list of numerical embeddings using OpenAI's embedding model.
Args:
- sentences (List[str]): A list of sentences (strings).
Returns:
- List[List[float]]: A list of lists of numerical embeddings.
"""
# Initialize the list to store embeddings
embeddings = []
# Loop through each sentence to convert to embeddings
for sentence in sentences:
# Use the OpenAI API to get embeddings for the sentence
response = openai_client.embeddings.create(
input=sentence, model="text-embedding-3-small"
)
embeddings.append(response.data[0].embedding)
return embeddings
def call_gpt(prompt: str, content: str) -> str:
"""
Sends a structured conversation context including a system prompt, user prompt,
and additional background content to the GPT-3.5-turbo model for a response.
This function is responsible for generating an AI-powered response by interacting
with the OpenAI API. It puts together a preset system message, a formatted user query,
and additional background information before requesting the completion from the model.
Args:
prompt (str): The main question or topic that the user wants to address.
content (str): Additional background information or details relevant to the prompt.
Returns:
str: The generated response from the GPT model based on the given prompts and content.
Note: 'openai_client' is assumed to be an already created and authenticated instance of the OpenAI
openai_client, which should be set up prior to calling this function.
"""
# Generates a response from the model based on the interactive messages provided
response = openai_client.chat.completions.create(
model="gpt-3.5-turbo", # The AI model being queried for a response
messages=[
# System message defining the assistant's role
{"role": "system", "content": "You are a helpful assistant."},
# User message containing the prompt
{"role": "user", "content": f"I want to ask you a question: {prompt}"},
# Assistant message asking for background content
{"role": "assistant", "content": "What is the background content?"},
# User providing the background content
{"role": "user", "content": content},
],
)
# Extracts and returns the response content from the model's completion
return response.choices[0].message.content
together_client = Together(api_key=os.environ["TOGETHER_API_KEY"])
def call_llama(prompt: str) -> str:
"""
Send a prompt to the Llama model and return the response.
Args:
prompt (str): The input prompt to send to the Llama model.
Returns:
str: The response from the Llama model.
"""
# Create a completion request with the prompt
response = together_client.chat.completions.create(
# Use the Llama-3-8b-chat-hf model
model="meta-llama/Llama-3-8b-chat-hf",
# Define the prompt as a user message
messages=[{"role": "user", "content": prompt}], # Use the input prompt
)
# Return the content of the first response message
return response.choices[0].message.content
def call_llama2(prompt: str, max_new_tokens: int = 50, temperature: float = 0.9) -> str:
"""
Calls the Llama API to generate text based on a given prompt, controlling the length and randomness.
Args:
prompt (str): The prompt text to send to the Llama model for text generation.
max_new_tokens (int, optional): The maximum number of tokens that the model should generate. Defaults to 50.
temperature (float, optional): Controls the randomness of the output. Lower values make the model more deterministic.
A higher value increases randomness. Defaults to 0.9.
Returns:
str: The generated text response from the Llama model.
Raises:
Exception: If the API call fails and returns a non-200 status code, it raises an exception with the error details.
"""
# API endpoint for the Llama model
api_url = "https://v6rkdcyir7.execute-api.us-east-1.amazonaws.com/beta"
# Configuration for the request body
json_body = {
"body": {
"inputs": f"<s>[INST] {prompt} [/INST]",
"parameters": {
"max_new_tokens": max_new_tokens,
"top_p": 0.9, # Fixed probability cutoff to select tokens with cumulative probability above this threshold
"temperature": temperature
}
}
}
# Headers to indicate that the payload is JSON
headers = {"Content-Type": "application/json"}
# Perform the POST request to the Llama API
response = requests.post(api_url, headers=headers, json=json_body)
# Parse the JSON response
response_body = response.json()['body']
# Convert the string response to a JSON object
body_list = json.loads(response_body)
# Extract the 'generated_text' from the first item in the list
generated_text = body_list[0]['generated_text']
# Separate the answer from the instruction
answer = generated_text.split("[/INST]")[-1].strip()
# Check the status code of the response
if response.status_code == 200:
return answer # Return the text generated by the model
else:
# Raise an exception if the API did not succeed
raise Exception(f"Error calling Llama API: {response.status_code}")
def quantize_to_kbit(arr: Union[np.ndarray, Any], k: int = 16) -> np.ndarray:
"""Converts an array to a k-bit representation by normalizing and scaling its values.
Args:
arr (Union[np.ndarray, Any]): The input array to be quantized.
k (int): The number of levels to quantize to. Defaults to 16 for 4-bit quantization.
Returns:
np.ndarray: The quantized array with values scaled to 0 to k-1.
"""
if not isinstance(arr, np.ndarray): # Check if input is not a numpy array
arr = np.array(arr) # Convert input to a numpy array
arr_min = arr.min() # Calculate the minimum value in the array
arr_max = arr.max() # Calculate the maximum value in the array
normalized_arr = (arr - arr_min) / (
arr_max - arr_min
) # Normalize array values to [0, 1]
return np.round(normalized_arr * (k - 1)).astype(
int
) # Scale normalized values to 0-(k-1) and convert to integer
def quantized_influence(
arr1: np.ndarray, arr2: np.ndarray, k: int = 16, use_dagger: bool = False
) -> Tuple[float, List[float]]:
"""
Calculates a weighted measure of influence based on quantized version of input arrays and optionally applies a transformation.
Args:
arr1 (np.ndarray): First input array to be quantized and analyzed.
arr2 (np.ndarray): Second input array to be quantized and used for influence measurement.
k (int): The quantization level, defaults to 16 for 4-bit quantization.
use_dagger (bool): Flag to apply a transformation based on local averages, defaults to False.
Returns:
Tuple[float, List[float]]: A tuple containing the quantized influence measure and an optional list of transformed values based on local estimates.
"""
# Quantize both arrays to k levels
arr1_quantized = quantize_to_kbit(arr1, k)
arr2_quantized = quantize_to_kbit(arr2, k)
# Find unique quantized values in arr1
unique_values = np.unique(arr1_quantized)
# Compute the global average of quantized arr2
total_samples = len(arr2_quantized)
y_bar_global = np.mean(arr2_quantized)
# Compute weighted local averages and normalize
weighted_local_averages = [
(np.mean(arr2_quantized[arr1_quantized == val]) - y_bar_global) ** 2
* len(arr2_quantized[arr1_quantized == val]) ** 2
for val in unique_values
]
qim = np.sum(weighted_local_averages) / (
total_samples * np.std(arr2_quantized)
) # Calculate the quantized influence measure
if use_dagger:
# If use_dagger is True, compute local estimates and map them to unique quantized values
local_estimates = [
np.mean(arr2_quantized[arr1_quantized == val]) for val in unique_values
]
daggers = {
unique_values[i]: v for i, v in enumerate(local_estimates)
} # Map unique values to local estimates
def find_val_(i: int) -> float:
"""Helper function to map quantized values to their local estimates."""
return daggers[i]
# Apply transformation based on local estimates
daggered_values = list(map(find_val_, arr1_quantized))
return qim, daggered_values
else:
# If use_dagger is False, return the original quantized arr1 values
daggered_values = arr1_quantized.tolist()
return qim
def query_search(
prompt: str,
sentences: list[str],
query_database: list[list[float]],
sources: list[str],
levels: int,
) -> pd.DataFrame:
"""
Takes a text prompt and searches a predefined database by converting the prompt
and database entries to embeddings, and then calculating a quantized influence metric.
Args:
- prompt (str): A text prompt to search for in the database.
Returns:
- pd.DataFrame: A pandas DataFrame sorted by the quantized influence metric in descending order.
The DataFrame contains the original sentences, their embeddings, and the computed scores.
"""
# Convert the prompt to its numerical embedding
prompt_embed_ = list_to_nums([prompt])
# Calculate scores for each item in the database using the quantized influence metric
scores = [
[
sentences[i], # The sentence itself
# query_database[i], # Embedding of the sentence
sources[i], # Source of the sentence
quantized_influence(
prompt_embed_[0], query_database[i], k=levels, use_dagger=False
), # Score calculation
]
for i in range(len(query_database))
]
# Convert the list of scores into a DataFrame
refs = pd.DataFrame(scores)
# Rename columns for clarity
refs = refs.rename(
# columns={0: "sentences", 1: "query_embeddings", 2: "page no", 3: "qim"}
columns={0: "sentences", 1: "page no", 2: "qim"}
)
# Sort the DataFrame based on the 'qim' score in descending order
refs = refs.sort_values(by="qim", ascending=False)
return refs
|