heygemini / app.py
rishiraj's picture
Update app.py
16defe0 verified
raw
history blame
4.69 kB
from huggingface_hub import InferenceClient
import gradio as gr
import os
import re
import requests
import http.client
import typing
import urllib.request
import vertexai
from vertexai.generative_models import GenerativeModel, Image
with open(".config/application_default_credentials.json", 'w') as file:
file.write(str(os.getenv('credentials')))
vertexai.init(project=os.getenv('project_id'))
model = GenerativeModel("gemini-1.0-pro-vision")
client = InferenceClient("google/gemma-7b-it")
def extract_image_urls(text):
url_regex = r"(https?:\/\/.*\.(?:png|jpg|jpeg|gif|webp|svg))"
image_urls = re.findall(url_regex, text, flags=re.IGNORECASE)
valid_image_url = ""
for url in image_urls:
try:
response = requests.head(url) # Use HEAD request for efficiency
if response.status_code in range(200, 300) and 'image' in response.headers.get('content-type', ''):
valid_image_url = url
except requests.exceptions.RequestException:
pass # Ignore inaccessible URLs
return valid_image_url
def load_image_from_url(image_url: str) -> Image:
with urllib.request.urlopen(image_url) as response:
response = typing.cast(http.client.HTTPResponse, response)
image_bytes = response.read()
return Image.from_bytes(image_bytes)
def search(url):
image = load_image_from_url(url)
response = model.generate_content([image,"Describe what is shown in this image."])
return response.text
def format_prompt(message, history, cust_p):
prompt = ""
if history:
for user_prompt, bot_response in history:
prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
prompt += f"<start_of_turn>model{bot_response}<end_of_turn>"
prompt+=cust_p.replace("USER_INPUT",message)
return prompt
def generate(
prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
):
custom_prompt="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model"
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
image = extract_image_urls(prompt)
if image:
image_description = "Image Description: " + search(image)
prompt = prompt.replace(image, image_description)
print(prompt)
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history, custom_prompt)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
output = ""
for response in stream:
output += response.token.text
yield [(prompt,output)]
history.append((prompt,output))
yield history
additional_inputs=[
gr.Textbox(
label="System Prompt",
max_lines=1,
interactive=True,
),
gr.Slider(
label="Temperature",
value=0.9,
minimum=0.0,
maximum=1.0,
step=0.05,
interactive=True,
info="Higher values produce more diverse outputs",
),
gr.Slider(
label="Max new tokens",
value=256,
minimum=0,
maximum=1048,
step=64,
interactive=True,
info="The maximum numbers of new tokens",
),
gr.Slider(
label="Top-p (nucleus sampling)",
value=0.90,
minimum=0.0,
maximum=1,
step=0.05,
interactive=True,
info="Higher values sample more low-probability tokens",
),
gr.Slider(
label="Repetition penalty",
value=1.2,
minimum=1.0,
maximum=2.0,
step=0.05,
interactive=True,
info="Penalize repeated tokens",
)
]
examples=[["What are they doing here https://upload.wikimedia.org/wikipedia/commons/3/38/Two_dancers.jpg ?", None, None, None, None, None]]
gr.ChatInterface(
fn=generate,
chatbot=gr.Chatbot(show_label=True, show_share_button=True, show_copy_button=True, likeable=True, layout="bubble", bubble_full_width=False),
additional_inputs=additional_inputs,
title="Gemma Gemini Multimodal Chatbot",
description="Gemini Sprint submission by Rishiraj Acharya. Uses Google's Gemini 1.0 Pro Vision multimodal model from Vertex AI with Google's Gemma 7B Instruct model from Hugging Face. Google Cloud credits are provided for this project.",
theme="Soft",
examples=examples,
concurrency_limit=20,
).launch(show_api=False)