Spaces:
Sleeping
Sleeping
File size: 6,742 Bytes
ab5f91a ddc268d ab5f91a 1d031d0 fe984aa ab5f91a b04a1c8 ab5f91a b7ea8ed 67deb0b b7ea8ed 67deb0b b7ea8ed d14c9fe 25c6586 d14c9fe 3efbabd d14c9fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
# import packages
import gradio as gr
import copy
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
import chromadb
from chromadb.utils.embedding_functions import OpenCLIPEmbeddingFunction
from chromadb.utils.data_loaders import ImageLoader
from chromadb.config import Settings
from datasets import load_dataset
import numpy as np
from tqdm import tqdm
import shutil
import os
from chromadb.utils import embedding_functions
import gradio as gr
from PIL import Image
import requests
from io import BytesIO
from transformers import pipeline
from bark import SAMPLE_RATE, generate_audio, preload_models
import json
# Initialize the Llama model
llm = Llama(
## original model
# model_path=hf_hub_download(
# repo_id="microsoft/Phi-3-mini-4k-instruct-gguf",
# filename="Phi-3-mini-4k-instruct-q4.gguf",
# ),
## compressed model
model_path=hf_hub_download(
repo_id="TheBloke/CapybaraHermes-2.5-Mistral-7B-GGUF",
filename="capybarahermes-2.5-mistral-7b.Q2_K.gguf",
),
n_ctx=2048,
n_gpu_layers=50, # Adjust based on your VRAM
)
# use of clip model for embedding
client = chromadb.PersistentClient(path="DB")
embedding_function = OpenCLIPEmbeddingFunction()
image_loader = ImageLoader() # must be if you reads from URIs
# initialize separate collection for image and text data
def create_collection(name_image_collection,name_text_collection):
collection_images = client.create_collection(
name=name_image_collection,
embedding_function=embedding_function,
data_loader=image_loader)
collection_text = client.create_collection(
name=name_text_collection,
embedding_function=embedding_function,
)
return collection_images, collection_text
collection_images,collection_text = create_collection(name_image_collection = "collection_images",name_text_collection = "collection_text")
# Get the uris to the images
IMAGE_FOLDER = 'images'
image_uris = sorted([os.path.join(IMAGE_FOLDER, image_name) for image_name in os.listdir(IMAGE_FOLDER) if not image_name.endswith('.txt')])
ids = [str(i) for i in range(len(image_uris))]
collection_images.add(ids=ids, uris=image_uris)
# adding text collections
default_ef = embedding_functions.DefaultEmbeddingFunction()
TEXT_FOLDER = "text"
text_pth = sorted([os.path.join(TEXT_FOLDER, image_name) for image_name in os.listdir(TEXT_FOLDER) if image_name.endswith('.txt')])
list_of_text = []
for text in text_pth:
with open(text, 'r') as f:
text = f.read()
list_of_text.append(text)
ids_txt_list = ['id'+str(i) for i in range(len(list_of_text))]
collection_text.add(
documents = list_of_text,
ids =ids_txt_list
)
# Initialize the transcriber
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
# Preload TTS models
preload_models()
image_path = "dom_bremen.jpg"
absolute_path = os.path.abspath(image_path)
def transcribe(audio):
sr, y = audio
y = y.astype(np.float32)
y /= np.max(np.abs(y))
return transcriber({"sampling_rate": sr, "raw": y})["text"]
fixed_prompt = "en_speaker_5"
def generate_audio_output(text):
audio_arr = generate_audio(text, history_prompt=fixed_prompt)
audio_arr = (audio_arr * 32767).astype(np.int16)
return (SAMPLE_RATE, audio_arr)
# Function to retrieve and generate text based on input query
def generate_text(message, max_tokens=150, temperature=0.2, top_p=0.9):
try:
# Retrieve context and image from vector store
retrieved_image = collection_images.query(query_texts=message, include=['data'], n_results=1)
context_text = collection_text.query(query_texts=message, n_results=1)
context = context_text['documents'][0] if context_text else "No relevant context found."
image_data = retrieved_image['uris'][0] if retrieved_image else None
image_url = image_data if image_data else None
# Log the image URL for debugging
print(f"Retrieved image URL: {image_url}")
# Create prompt template for LLM
prompt_template = (
f"Context: {context}\n\n"
f"Question: {message}\n\n"
f"You are a guide to city of Bremen from Germany, generate response based on context."
)
# Generate text using the language model
output = llm(
prompt_template,
temperature=temperature,
top_p=top_p,
top_k=50,
repeat_penalty=1.1,
max_tokens=max_tokens,
)
# Process the output
input_string = output['choices'][0]['text'].strip()
cleaned_text = input_string.strip("[]'").replace('\\n', '\n')
continuous_text = '\n'.join(cleaned_text.split('\n'))
return continuous_text, image_url[0]
except Exception as e:
return f"Error: {str(e)}", None
# Function to load and display an image from a file path
def load_image_from_path(file_path):
try:
img = Image.open(file_path)
return img
except Exception as e:
print(f"Error loading image: {str(e)}")
return None
def process_audio(audio):
# Transcribe the audio
# transcribed_text = transcribe(audio)
message = "Bremen Schnoor"
text_output, image_path = generate_text(message)
if image_path:
image_output = load_image_from_path(image_path)
else:
image_output = None # Handle cases where no image is retrieved
# return text_output, image_output
# Generate audio output
audio_output = generate_audio_output(text_output)
return text_output,audio_output,image_output
def gen_tts(text):
audio_arr = generate_audio(text, history_prompt=fixed_prompt)
audio_arr = (audio_arr * 32767).astype(np.int16)
return (SAMPLE_RATE, audio_arr)
# Define the Gradio interface
# with gr.Blocks() as app:
demo = gr.Interface(
fn=process_audio,
inputs=gr.Audio(sources=["microphone"], label="Input Audio"),
outputs=[
gr.Textbox(label="Generated Text"),
gr.Audio(label="Generated Audio"),
gr.Image(label="Retrieved Image") # New output component for the image
],
title="moinBremen - Your Personal Tour Guide for our City of Bremen",
description="Ask your question about Bremen by speaking into the microphone. The system will transcribe your question, generate a response, and read it out loud.",
css=""".gradio-container {
background: url('file=/content/dom_bremen.jpg') no-repeat center center fixed;
background-size: cover;
}""",
cache_examples=False,
)
demo.launch(allowed_paths=[absolute_path])
|