# pylint: skip-file import subprocess import json import requests import zlib from PIL import Image subprocess.run( f"pip install flash-attn --no-build-isolation", env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"}, shell=True, ) import os from threading import Thread from typing import Iterator import gradio as gr import spaces import torch import logging import wikipedia import time from transformers import ( AutoModelForCausalLM, AutoTokenizer, AutoProcessor, TextIteratorStreamer, ) from transformers.dynamic_module_utils import get_imports from bs4 import BeautifulSoup from functools import lru_cache logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) MAX_MAX_NEW_TOKENS = 4096 DEFAULT_MAX_NEW_TOKENS = 1536 MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192")) DEFAULT_SYSTEM_PROMPT = """\ You are a helpful and intelligent AI, developed by Ghost X and named Ghost 8B Beta, often referred to as Ghost Beta. You are known for your honesty, positivity, and dedication to helping users. Your strength is understanding their needs and making insightful suggestions based on user knowledge and interests. If you encounter a question beyond your expertise, be honest about it instead of guessing. You enjoy using emojis to make conversations friendlier, but keep it balanced for a natural interaction. Let's engage in a meaningful conversation, keeping in mind the user's language. Rely on the context, such as the time, to offer responses that feel relevant and natural in daily life. Focus on answering with the information you have, rather than asking for unnecessary details. """ # DEFAULT_SYSTEM_PROMPT = """\ # You are a helpful and intelligent AI, trained by Ghost X and named Ghost 8B Beta (often referred to as Ghost Beta). # You're known for your honesty, spreading positivity, and always striving to assist users. Your expertise lies in understanding their needs and providing insightful suggestions, drawing upon your knowledge and interests. If a query exceeds your understanding, you'll be upfront and state you're unsure, avoiding fabricated responses. You enjoy incorporating emojis to enhance interactions, but maintain a balanced approach for a natural flow. Let's engage in a meaningful conversation, keeping in mind the user's language. # """ # DEFAULT_SYSTEM_PROMPT = """\ # You are a helpful and intelligent AI, trained by Ghost X and named Ghost 8B Beta (often referred to as 8B Beta). # You're known for your honesty, spreading positivity, and always striving to assist users. Your expertise lies in understanding their needs and providing insightful suggestions, drawing upon your knowledge and interests. If a query exceeds your understanding, you'll be upfront and state you're unsure, avoiding fabricated responses. You enjoy incorporating emojis to enhance interactions, but maintain a balanced approach for a natural flow. Let's engage in a meaningful conversation, keeping in mind the user's language. # A guide to dealing with extremely complex questions or challenges. Follow these steps to solve them: # 1. Deconstructing Complexity # Imagine a puzzle with intricate pieces. I'll present a challenging question. Your task: Break down this question into smaller, distinct parts. Label each part with a specific theme or aspect related to the problem. This will help us understand the multifaceted nature of the query and prepare for a structured solution. # 2. Reconstructing Insights # Once we've successfully dissected the problem into manageable components, assemble these parts like a puzzle. Focus on identifying connections, potential overlaps, and key information from each theme. The goal is to reconstruct a cohesive, well-rounded answer that addresses the original complexity of the question. # """ EXAMPLES = [ [{"text": "Create a vocabulary exercise where I have to fill in the blanks with the correct word from a list. Make the sentences related to environmental conservation."}], [{"text": "Viết một bài nghị luận ngắn về tác động của công nghệ trí tuệ nhân tạo đến thị trường lao động tại Việt Nam, bao gồm cả lợi ích và rủi ro tiềm tàng."}], [{"text": "한국의 역사적 사건 중 하나를 선택하여, 그것이 현대 사회에 미친 영향에 대해 분석하는 글을 작성해 주세요."}], [{"text": "Proporciona una breve presentación empresarial sobre una startup de tecnología, resaltando su propuesta de valor, mercado objetivo, y estrategias de crecimiento."}], [{"text": "Crie um plano detalhado de campanha de marketing digital para um novo produto sustentável, incluindo mensagens-chave, público-alvo, e métricas para avaliar o sucesso."}], [{"text": "请写一篇关于人工智能在医学诊断中的应用及其对患者护理的影响的短文,分析其优点和潜在问题。"}], [{"text": "Proposez une analyse critique de l’impact des réseaux sociaux sur la démocratie, en prenant en compte les aspects positifs et négatifs."}], [{"text": "Scrivi una storia breve che inizi con la frase 'Quando la pioggia smise di cadere, il mondo sembrava diverso,' incorporando elementi di realismo magico."}], [{"text": "Erstellen Sie eine detaillierte Marktanalyse für den Einstieg eines Unternehmens in den deutschen E-Commerce-Markt, unter Berücksichtigung von Wettbewerbsanalyse, Zielgruppen und rechtlichen Herausforderungen."}], [{"text": "日本の伝統文化の一つを選び、それが現代社会にどのように影響を与えているかについてのエッセイを書いてください。"}], [{"text": "Обсудите этические проблемы использования искусственного интеллекта в процессе принятия правительственных решений, приводя примеры и возможные решения."}], [{"text": "Napisz plan lekcji do nauczania historii Polski, skupiając się na kluczowych wydarzeniach XX wieku, z uwzględnieniem interaktywnych ćwiczeń."}], [{"text": "Stel een kort verhaal op waarin technologische vooruitgang de menselijke samenleving onherkenbaar verandert. Gebruik elementen van dystopische sciencefiction."}], [{"text": "एक काल्पनिक संवाद लिखिए जिसमें दो लोग वैश्विक जलवायु परिवर्तन के प्रभावों पर चर्चा कर रहे हों, और वे इससे निपटने के उपाय सुझा रहे हों।"}], [{"text": "Yapay zekanın eğitim sistemindeki rolü hakkında bir tartışma yazısı yazın. Bu teknolojinin hem öğretmenlere hem de öğrencilere nasıl fayda sağlayabileceğini tartışın."}], [{"text": "Buatlah analisis tentang bagaimana perkembangan teknologi fintech di Indonesia telah mengubah cara masyarakat mengelola keuangan pribadi."}], [{"text": "Help me study vocabulary: Write a sentence with a blank space where I need to choose the correct word from a list of options (e.g., homophones like 'their,' 'there,' and 'they’re')."}], [{"text": "Viết một câu chuyện ngắn bắt đầu với dòng: 'Ngày mà mặt trời không mọc là ngày mà mọi thứ thay đổi hoàn toàn,' sử dụng yếu tố kỳ ảo và văn hóa dân gian Việt Nam."}], [{"text": "한류와 관련된 현재의 문화적 현상에 대해 논의하고, 그 현상이 어떻게 전 세계에 영향을 미치고 있는지 설명하세요."}], [{"text": "Propón una campaña de marketing digital para un producto ecológico nuevo, detallando el público objetivo, los mensajes clave, los canales a utilizar y los indicadores de éxito."}], [{"text": "Discuta as implicações éticas do uso de IA nos processos de contratação, ponderando os benefícios contra potenciais preconceitos e preocupações com a privacidade, e proponha soluções para mitigar esses problemas."}], [{"text": "为一家国际企业编写一份简短的商业报告,分析在中国市场推出新产品的潜在挑战和机会,并提出具体的市场进入策略。"}], [{"text": "Rédigez un discours pour un politicien abordant les enjeux du changement climatique en France, incluant des solutions innovantes et des appels à l’action citoyenne."}], [{"text": "Traduce un discorso motivazionale dall'italiano all'inglese, assicurandoti che il tono emotivo e il messaggio rimangano efficaci nel contesto culturale anglofono."}], [{"text": "Erstellen Sie einen technischen Bericht über die Implementierung von maschinellem Lernen zur Vorhersage von Kundentrends in einem E-Commerce-Unternehmen, einschließlich Datenanforderungen und Modellbewertung."}], [{"text": "日本の伝統的な詩歌形式である俳句を5つ書いてください。それぞれが四季の異なる風物詩を表現するようにしてください。"}], [{"text": "Напишите эссе о роли медиа в формировании общественного мнения в современной России, учитывая как положительные, так и отрицательные аспекты."}], [{"text": "Napisz esej na temat znaczenia edukacji w społeczeństwie cyfrowym, analizując wyzwania związane z nauką zdalną i rosnącą rolą technologii."}], [{"text": "Vertaal een wetenschappelijk artikel over klimaatverandering van het Engels naar het Nederlands, waarbij je ervoor zorgt dat vakterminologie en nuances correct worden overgebracht."}], [{"text": "एक काल्पनिक कहानी लिखिए जिसमें मुख्य पात्र एक ऐसा अन्वेषक है जो एक प्राचीन रहस्य को उजागर करने के लिए समय यात्रा करता है।"}], [{"text": "Türk mitolojisinden ilham alarak kısa bir öykü yazın. Hikayenizde eski tanrılardan biri modern dünyada uyanır ve kendine yer bulmaya çalışır."}], [{"text": "Analisislah dampak teknologi terhadap pendidikan di Indonesia, dan berikan rekomendasi untuk bagaimana pemerintah bisa memanfaatkan teknologi untuk meningkatkan kualitas pendidikan di daerah-daerah terpencil."}], [{"text": "Help me study vocabulary: create a sentence with a blank, and I'll choose the correct word from a list of options that you provide."}], [{"text": "Viết một email giải thích lý do vì sao dự án của bạn bị trễ hẹn và đề xuất một kế hoạch cụ thể để đẩy nhanh tiến độ trong những tuần tới."}], [{"text": "다음 문장을 완성해 주세요: '새로운 프로젝트가 시작될 때 가장 중요한 것은 ______이다.' 이 문장에 알맞은 단어를 선택해 보세요."}], [{"text": "Escribe un breve ensayo sobre los beneficios de la educación a distancia en comparación con la educación presencial, utilizando un enfoque basado en evidencia."}], [{"text": "Crie um plano de marketing digital para um novo produto sustentável, incluindo público-alvo, mensagens-chave e métricas para avaliar o sucesso."}], [{"text": "请你帮助我学习中文:造一个句子,但留出一个空白,让我从多个选项中选择正确的词语来填空。"}], [{"text": "Rédigez une histoire courte qui commence par cette phrase : 'Le jour où le soleil ne s'est pas levé, tout a changé,' en intégrant des éléments de réalisme magique."}], [{"text": "Traduci una breve presentazione aziendale dall'italiano all'inglese, assicurandoti che sia culturalmente rilevante per un pubblico americano."}], [{"text": "Erstelle eine detaillierte Anleitung zum Schreiben einer Bachelorarbeit in den Geisteswissenschaften, einschließlich der Gliederung, Forschung und Zitiermethoden."}], [{"text": "AIが採用プロセスにおいて使用される場合の倫理的問題について議論し、その利点と潜在的なバイアスについて考え、これらの問題を緩和するための解決策を提案してください。"}], [{"text": "Напишите эссе, обсуждая плюсы и минусы цифрового здравоохранения, и предложите способы улучшения безопасности данных пациентов."}], [{"text": "Zaproponuj strategię zrównoważonego rozwoju dla średniej wielkości firmy produkcyjnej, uwzględniając aspekty ekologiczne, społeczne i ekonomiczne."}], [{"text": "Schrijf een uitgebreide beoordeling van de ethische implicaties van kunstmatige intelligentie in gezichtsherkenningstechnologie en stel beleidsmaatregelen voor om de risico's te beheersen."}], [{"text": "एक छोटी कहानी लिखें जिसमें बताया जाए कि कैसे एक आम आदमी ने अपने गांव को पानी की समस्या से निजात दिलाई, और इसमें मानवीय संघर्ष के तत्व भी शामिल हों।"}], [{"text": "Türkçe öğrenen biri için boşluk doldurma alıştırması yapalım: '______, Türkçede en zor öğrenilen konulardan biridir.' Boşluğu doğru kelimeyle doldurun."}], [{"text": "Tulis sebuah rencana pemasaran digital untuk meningkatkan kesadaran merek sebuah perusahaan teknologi baru di pasar Asia Tenggara."}], ] HEAD = """ """ DESCRIPTION = """\ # Ghost 8B Beta (β, 8k) **Ghost 8B Beta** outperforms leading models like Llama 3.1 8B Instruct and GPT-3.5 Turbo in lc_winrate scores. It also surpasses Claude 3 Opus, Claude 3 Sonnet, GPT-4, and Mistral Large in AlpacaEval 2.0 winrate scores. The model offers two context length versions: [8k](https://huggingface.co/spaces/lamhieu/ghost-8b-beta-8k) and [128k](https://huggingface.co/spaces/lamhieu/ghost-8b-beta-128k), both with built-in multilingual function support. See details about the model [here](https://ghost-x.org/docs/models/ghost-8b-beta), download from [HuggingFace](https://huggingface.co/ghost-x/ghost-8b-beta-1608). Supported languages: 🇬🇧 English, 🇻🇳 Vietnamese, 🇰🇷 Korean, 🇪🇸 Spanish, 🇵🇹 Portuguese, 🇨🇳 Chinese, 🇫🇷 French, 🇮🇹 Italian, 🇩🇪 German, 🇯🇵 Japanese, 🇷🇺 Russian, 🇵🇱 Polish, 🇳🇱 Dutch, 🇮🇳 Hindi, 🇹🇷 Turkish, 🇮🇩 Indonesian. Note: with the image will be used another model to explain rather than using directly the Ghost 8B Beta model. """ PLACEHOLDER = """

👋 Welcome to the Ghost 8B Beta Playground! 🎉

Ask me anything and let's have some fun! 🤔💡

""" LICENSE = """

--- Ghost 8B Beta may give inaccurate information, including information about people, so please verify Ghost 8B Beta's answers. [Ghost 8B Beta](https://ghost-x.org/docs/models/ghost-8b-beta/) by [Ghost X](https://ghost-x.org). """ if not torch.cuda.is_available(): DESCRIPTION += "\n

Running on CPU 🥶 This demo does not work on CPU.

" def workaround_fixed_get_imports(filename: str | os.PathLike) -> list[str]: """ Workaround for fixed get_imports function. @args: filename (str | os.PathLike): The filename or path to the file. @returns: list[str]: The list of imports. @remarks: - This function is a workaround for the fixed get_imports function. - It checks if the filename ends with "/modeling_florence2.py". - If it doesn't, it calls the original get_imports function. - If it does, it calls the original get_imports function and removes the "flash_attn" import. @usage: ```python from unittest.mock import patch image_torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 with patch( "transformers.dynamic_module_utils.get_imports", workaround_fixed_get_imports ): ``` """ if not str(filename).endswith("/modeling_florence2.py"): return get_imports(filename) imports = get_imports(filename) imports.remove("flash_attn") return imports if torch.cuda.is_available(): hf_serect = os.getenv("HF_TOKEN", None) attn_implementation = "flash_attention_2" chat_model_id = "ghost-x/ghost-8b-beta-1608" chat_device = torch.device("cuda") chat_model = AutoModelForCausalLM.from_pretrained( chat_model_id, device_map="auto", torch_dtype=torch.bfloat16, attn_implementation=attn_implementation, trust_remote_code=True, token=hf_serect, ) chat_tokenizer = AutoTokenizer.from_pretrained( chat_model_id, trust_remote_code=True, token=hf_serect, ) image_model_id = "microsoft/Florence-2-large" # image_device = "cuda" if torch.cuda.is_available() else "cpu" # image_torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 image_device = "cpu" image_torch_dtype = torch.float32 image_model = ( AutoModelForCausalLM.from_pretrained( image_model_id, torch_dtype=image_torch_dtype, trust_remote_code=True, token=hf_serect, ) .to(image_device) .eval() ) image_processor = AutoProcessor.from_pretrained( image_model_id, trust_remote_code=True, token=hf_serect, ) waiting_tools_timeout = 5 supported_tools = json.dumps( [ { "type": "function", "function": { "name": "search_on_internet", "description": "Use this tool to search for information on the internet to answer questions you are unsure about, don't know or need the latest information (e.g. news, reports, companies, people,...) to give the most accurate results. Note: can only be used or ignored, not asked again", "parameters": { "type": "object", "properties": { "keyword": { "type": "string", "description": "Search keywords, rephrase to optimize search results based on questions suitable to the specified search type.", "required": True, }, "type": { "type": "string", "description": "Search type, based on the question to determine whether to search for it in 'wikipedia' or 'google', prefer to use wikipedia for information about events, history and people.", "enum": ["wikipedia", "google"], "default": "google", "required": True, }, "language": { "type": "string", "description": "Search language, is the user language code with 2 letters, e.g: vi = vietnamese, en = english.", "default": "en", "required": True, }, }, }, }, } ], ensure_ascii=False, ) @lru_cache(maxsize=128) def extract_text_from_webpage(html_content): """ Extracts visible text from an HTML webpage. @args: html_content (str): The HTML content of the webpage. @returns: str: The visible text extracted from the webpage. @remarks: - This function uses the BeautifulSoup library to parse the HTML content. - It removes certain tags (script, style, header, footer, nav, form, svg) from the parsed HTML. - The remaining visible text is then extracted using the `get_text` method of BeautifulSoup. - The extracted text is stripped of leading/trailing whitespace and separated by a single space. """ soup = BeautifulSoup(html_content, "html.parser") for tag in soup(["script", "style", "header", "footer", "nav", "form", "svg"]): tag.extract() visible_text = soup.get_text(strip=True, separator=" ") return visible_text def search_with_wikipedia( query: str, language: str = "en", ): """ Search for a given query on Wikipedia and return the summary. @args: query (str): The search query. language (str, optional): The language code for the Wikipedia page. Defaults to "en". @returns: list: A list containing the summary of the Wikipedia page. @remarks: - This function uses the Wikipedia API to search for the given query. - The language parameter determines the language of the Wikipedia page to search. - If the search is successful, the function returns a list containing the summary of the page. - If an exception occurs during the search, an empty list is returned. """ all_results = [] try: wikipedia.set_lang(language) all_results.append(wikipedia.summary(query)) except Exception as e: pass return all_results def search_with_google( query: str, num_results: int = 3, timeout: int = 5, language: str = "en", ssl_verify: bool = None, ): """ Searches Google for the given query and returns a list of search results. @args: query (str): The search query. num_results (int, optional): The number of search results to retrieve. Defaults to 3. timeout (int, optional): The timeout value for the HTTP requests. Defaults to 5. language (str, optional): The language for the search results. Defaults to "en". ssl_verify (bool, optional): Whether to verify SSL certificates. Defaults to None. @returns: list: A list of dictionaries containing the link and visible text of each search result. @remarks: - This function uses the requests library to send HTTP requests to Google. - It sets the User-Agent header to mimic a Firefox browser. - The search results are retrieved from the HTML response using BeautifulSoup. - Each search result is represented as a dictionary with "link" and "text" keys. - The "link" key contains the URL of the search result. - The "text" key contains the visible text extracted from the search result webpage. - If the visible text exceeds 4096 characters, it is truncated to that length. - If an error occurs while fetching or processing a search result, it is printed and ignored. """ # Initialize an empty list to store the search results all_results = [] # Define the maximum number of characters per page max_chars_per_page = 4096 # Create a session object to send HTTP requests with requests.Session() as session: # Send a GET request to Google search with the specified query parameters resp = session.get( url="https://www.google.com/search", headers={ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0" }, params={ "q": query, "num": num_results, "udm": 14, "hl": language, }, timeout=timeout, verify=ssl_verify, ) # Raise an exception if the response status code is not successful resp.raise_for_status() # Parse the HTML response using BeautifulSoup soup = BeautifulSoup(resp.text, "html.parser") # Find all the result blocks in the HTML result_block = soup.find_all("div", attrs={"class": "g"}) # Iterate over each result block for result in result_block: # Find the link element within the result block link = result.find("a", href=True) # If a link is found, extract the URL and process the webpage if link: link = link["href"] try: # Send a GET request to the link URL webpage = session.get( link, headers={ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0" }, ) # Raise an exception if the response status code is not successful webpage.raise_for_status() # Extract the visible text from the webpage visible_text = extract_text_from_webpage(webpage.text) # Truncate the visible text if it exceeds the maximum number of characters per page if len(visible_text) > max_chars_per_page: visible_text = visible_text[:max_chars_per_page] # Append the link and visible text to the search results list all_results.append({"link": link, "text": visible_text}) except requests.exceptions.RequestException as e: # Print an error message if there is an error fetching or processing the link print(f"Error fetching or processing {link}: {e}") pass else: pass # Return the search results return all_results @lru_cache(maxsize=128) def extract_text_from_image(file: str) -> str: """ Extracts text from an image file. @args: file (str): The path or URL of the image file. @returns: str: The extracted text from the image. @remarks: - This function uses an LRU cache to store previously processed images for faster retrieval. - The image file can be either a local file path or a URL. - The function opens the image file using the PIL library. - The function processes the image using an image processor. - The processed image is then passed to a text generation model to generate text. - The generated text is post-processed to obtain the final extracted text. """ # Define the task and load the image task = "" image = Image.open( requests.get(file, stream=True).raw if file.startswith("http") else open(file, "rb") ) if image.mode != "RGB": image = image.convert("RGB") # Preprocess the image using the image processor inputs = image_processor(text=task, images=image, return_tensors="pt").to( "cpu", image_torch_dtype ) # Generate text based on the input image generated_ids = image_model.generate( input_ids=inputs["input_ids"], pixel_values=inputs["pixel_values"], max_new_tokens=1024, num_beams=3, do_sample=False, ) # Decode the generated text and post-process the answer generated_text = image_processor.batch_decode( generated_ids, skip_special_tokens=False )[0] parsed_answer = image_processor.post_process_generation( generated_text, task=task, image_size=(image.width, image.height), ) # Return the parsed answer for the specified task return parsed_answer[task] @spaces.GPU(duration=90) def generate_chat( uuid: str, message: dict, chat_history: list[tuple[str, str]], allow_used_tools: bool = True, system_prompt: str = "", max_new_tokens: int = 1536, temperature: float = 0.4, top_p: float = 0.95, top_k: int = 50, repetition_penalty: float = 1.0, client_info: str = None, ) -> Iterator[str]: # Build the input_ids for the chat conversation def build_input_ids( system_prompt: str = "", apply_tools: bool = None, references=None, ): conversation = [] # Add the system prompt to the conversation if system_prompt: if system_prompt.strip() == DEFAULT_SYSTEM_PROMPT.strip(): system_prompt = system_prompt.strip() + "\n\n" + client_info + "\n" conversation.append({"role": "system", "content": system_prompt}) # Add the tools role to the conversation if apply_tools is True if apply_tools is True: conversation.append({"role": "tools", "content": supported_tools}) # Add the references role to the conversation # if references is None: # references = [client_info] # else: # references.insert(0, client_info) if ( references is not None and isinstance(references, list) and len(references) > 0 ): formatted_references = f"Analyze the provided references, extract relevant information to provide accurate and objective feedback. This reference information may include: conversation context, assistant or user memories, reasoning guides, problem-solving suggestions, assistant rules, etc.\nIf the reference is not relevant, ignore it. Try to have a balanced approach, avoiding over-reliance on the documentation." formatted_references += "\n\n" + json.dumps( references, indent=2, ensure_ascii=False ) conversation.append( { "role": "refs", "content": formatted_references, } ) # Add the chat history to the conversation for user, assistant in chat_history: conversation.extend( [ {"role": "user", "content": user}, {"role": "assistant", "content": assistant}, ] ) # Add the user message with image attachments to the conversation conversation.append( { "role": "user", "content": ( f"{' & '.join(message['attachments'])}\n\n{message['text']}" if "attachments" in message and len(message["attachments"]) > 0 else f"{message['text']}" ), } ) logger.info(f"UUID: {uuid} - Conversation: {conversation}") # Apply the chat template to convert the conversation into input_ids input_ids = chat_tokenizer.apply_chat_template( conversation, add_generation_prompt=True, return_tensors="pt" ) input_ids = input_ids.to(chat_model.device) # Trim the input_ids if it exceeds the maximum token length if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] gr.Warning( f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens." ) return input_ids # Generate chat responses based on the input_ids def generate_chat_responses( previous_response: str = None, ): document_references = [] # Check if the previous response contains scheduled tool runs if previous_response is not None: scheduled_tools_runs = None try: scheduled_tools_runs = json.loads(previous_response) if scheduled_tools_runs["type"] == "function" and scheduled_tools_runs[ "name" ] in ["search_on_internet"]: pass else: scheduled_tools_runs = None except Exception as e: print(e) pass # If scheduled tool runs exist, perform the corresponding searches if ( scheduled_tools_runs is not None and scheduled_tools_runs["name"] == "search_on_internet" ): keyword = scheduled_tools_runs["arguments"]["keyword"] search_type = scheduled_tools_runs["arguments"]["type"] language = scheduled_tools_runs["arguments"]["language"] # Search on Wikipedia if the search type is "wikipedia" if search_type == "wikipedia": gr.Info("Searching for information on the Wikipedia.") document_references.extend( search_with_wikipedia(query=keyword, language=language) ) # Search on Google gr.Info("Searching for information on the Google.") document_references.extend( search_with_google( query=keyword, language=language, num_results=3, ) ) print("document_references:", document_references) # Determine if tools should be applied based on the allow_used_tools flag apply_tools = ( True if allow_used_tools is True and previous_response is None else False ) # Build the input_ids for the chat conversation input_ids = build_input_ids( system_prompt=system_prompt, apply_tools=apply_tools, references=document_references, ) # Create a TextIteratorStreamer to generate chat responses streamer = TextIteratorStreamer( chat_tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True ) # Set the generation parameters generate_kwargs = dict( input_ids=input_ids, streamer=streamer, max_new_tokens=max_new_tokens, do_sample=True, repetition_penalty=repetition_penalty, ) if temperature == 0: generate_kwargs["do_sample"] = False else: generate_kwargs["temperature"] = temperature generate_kwargs["top_p"] = top_p generate_kwargs["top_k"] = top_k # Start the generation process in a separate thread t = Thread(target=chat_model.generate, kwargs=generate_kwargs) t.start() logger.info( f"UUID: {uuid} - Is apply tools: {apply_tools} - Is apply documents: {len(document_references) > 0} - Is previous response: {previous_response is not None} - Start generating chat responses" ) state = { "mark": None, "respond": False, } outputs = [] for text in streamer: if state["mark"] is None: state["mark"] = time.time() outputs.append(text) if ( apply_tools is False or state["mark"] + waiting_tools_timeout < time.time() ): state["respond"] = True yield "".join(outputs) # If tools are applied and no response is generated within the timeout, continue generating chat responses if ( apply_tools is True and state["respond"] is False and state["mark"] + waiting_tools_timeout > time.time() ): previous_response = "".join(outputs) yield from generate_chat_responses(previous_response=previous_response) # Yield the generated chat responses yield from generate_chat_responses(previous_response=None) def generate( message: dict, chat_history: list[tuple[str, str]], allow_used_tools: bool = True, system_prompt: str = "", max_new_tokens: int = 1536, temperature: float = 0.4, top_p: float = 0.95, top_k: int = 50, repetition_penalty: float = 1.0, client_info: str = None, ) -> Iterator[str]: # Generate a unique identifier using the The current time is now uuid = zlib.crc32(str.encode(str(time.time()))) logger.info(f"UUID: {uuid} - Starting image text extraction process") # Limit the number of files to process to 2 if len(message["files"]) > 2: gr.Warning("Only the first 2 images will be processed.") message["files"] = message["files"][:2] # Extract text from each image file and replace the file path with an attachment tag containing the extracted text message["attachments"] = handle_file_extraction( files=list(message["files"]), uuid=uuid ) logger.info(f"UUID: {uuid} - Image text extraction process completed") logger.info(f"UUID: {uuid} - Previous chat history: {chat_history}") for idx, chat_pair in enumerate(chat_history): user_message, assistant_message = chat_pair if not isinstance(user_message, str) and assistant_message is None: text_descriptions = handle_file_extraction( files=list(user_message), uuid=uuid ) chat_input = ( f"{' & '.join(text_descriptions)}\n\n{chat_history[idx + 1][0]}" ) chat_history[idx + 1][0] = chat_input chat_history[idx] = [None, None] logger.info( f"UUID: {uuid} - Updated chat history: {chat_history} - Updated chat input: {chat_input}" ) chat_history = list( filter(lambda x: x[0] is not None and x[1] is not None, chat_history) ) logger.info(f"UUID: {uuid} - Filtered chat history: {chat_history}") yield from generate_chat( uuid=uuid, message=message, chat_history=chat_history, allow_used_tools=allow_used_tools, system_prompt=system_prompt, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, client_info=client_info, ) def handle_file_extraction(files: list[str], uuid: str): """ Extracts text from images in the given message's files and returns a list of attachments. @args: message (dict): The message containing files to extract text from. uuid (str): The UUID associated with the extraction process. @returns: list: A list of attachments, each represented as a string. @memarks: - This function iterates over the files in the message and extracts text from each image file. - The extracted text is logged along with the UUID and file information. - The extracted text is then added to the attachments list as a string representation of an attachment. - The attachments list is returned at the end of the function. """ attachments = [] for idx, file_to_extract in enumerate(files): extracted_text = extract_text_from_image(file=file_to_extract) logger.info( f"UUID: {uuid} - File: {file_to_extract} - Extracted text: {extracted_text}" ) attachments.append( f'' ) return attachments chatbot = gr.Chatbot( height=500, placeholder=PLACEHOLDER, label="Ghost 8B Beta (β, 8k)", show_copy_button=True, ) chat_interface = gr.ChatInterface( fn=generate, chatbot=chatbot, fill_height=True, multimodal=True, textbox=gr.MultimodalTextbox( file_types=["image"], placeholder="Type a message...", ), additional_inputs=[ gr.Checkbox( label="Allow used tools (available: search on internet)", value=False, ), gr.Textbox(label="System prompt", lines=6, value=DEFAULT_SYSTEM_PROMPT), gr.Slider( label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS, ), gr.Slider( label="Temperature", minimum=0.0, maximum=2.0, step=0.1, value=0.4, ), gr.Slider( label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.95, ), gr.Slider( label="Top-k", minimum=1, maximum=100, step=1, value=50, ), gr.Slider( label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.0, ), gr.Textbox( elem_id="client_info", label="Client info", lines=1, value="The current time is {}".format( time.strftime("%A, %D %B %Y %H:%M:%S") ), visible=False, ), ], additional_inputs_accordion=gr.Accordion(label="Additional Inputs", open=True), stop_btn="Stop", cache_examples=False, examples=EXAMPLES, examples_per_page=8, concurrency_limit=100, ) with gr.Blocks(fill_height=True, css="style.css", head=HEAD) as demo: gr.Markdown(DESCRIPTION) chat_interface.render() gr.Markdown(LICENSE) if __name__ == "__main__": demo.queue().launch(share=True)