from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration import torch import gradio as gr # PersistDataset ----- import os import csv from gradio import inputs, outputs import huggingface_hub from huggingface_hub import Repository, hf_hub_download, upload_file from datetime import datetime from typing import List, Dict import httpx import pandas as pd # -------------------------------------------- For Memory - you will need to set up a dataset and HF_TOKEN --------- UseMemory=True if UseMemory: DATASET_REPO_URL="https://huggingface.co/datasets/awacke1/ChatbotMemory.csv" DATASET_REPO_ID="awacke1/ChatbotMemory.csv" DATA_FILENAME="ChatbotMemory.csv" DATA_FILE=os.path.join("data", DATA_FILENAME) HF_TOKEN=os.environ.get("HF_TOKEN") if UseMemory: try: hf_hub_download( repo_id=DATASET_REPO_ID, filename=DATA_FILENAME, cache_dir=DATA_DIRNAME, force_filename=DATA_FILENAME ) except: print("file not found") repo = Repository( local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN ) async def get_splits(dataset_name: str) -> Dict[str, List[Dict]]: URL = f"https://datasets-server.huggingface.co/splits?dataset={dataset_name}" async with httpx.AsyncClient() as session: response = await session.get(URL) return response.json() async def get_valid_datasets() -> Dict[str, List[str]]: URL = f"https://datasets-server.huggingface.co/valid" async with httpx.AsyncClient() as session: response = await session.get(URL) datasets = response.json()["valid"] return gr.Dropdown.update(choices=datasets, value="kelm") # The one to watch: https://huggingface.co/rungalileo # rungalileo/medical_transcription_40 async def get_first_rows(dataset: str, config: str, split: str) -> Dict[str, Dict[str, List[Dict]]]: URL = f"https://datasets-server.huggingface.co/first-rows?dataset={dataset}&config={config}&split={split}" async with httpx.AsyncClient() as session: response = await session.get(URL) print(URL) gr.Markdown(URL) return response.json() def get_df_from_rows(api_output): return pd.DataFrame([row["row"] for row in api_output["rows"]]) async def update_configs(dataset_name: str): splits = await get_splits(dataset_name) all_configs = sorted(set([s["config"] for s in splits["splits"]])) return (gr.Dropdown.update(choices=all_configs, value=all_configs[0]), splits) async def update_splits(config_name: str, state: gr.State): splits_for_config = sorted(set([s["split"] for s in state["splits"] if s["config"] == config_name])) dataset_name = state["splits"][0]["dataset"] dataset = await update_dataset(splits_for_config[0], config_name, dataset_name) return (gr.Dropdown.update(choices=splits_for_config, value=splits_for_config[0]), dataset) async def update_dataset(split_name: str, config_name: str, dataset_name: str): rows = await get_first_rows(dataset_name, config_name, split_name) df = get_df_from_rows(rows) return df # Guido von Roissum: https://www.youtube.com/watch?v=-DVyjdw4t9I async def update_URL(dataset: str, config: str, split: str) -> str: URL = f"https://datasets-server.huggingface.co/first-rows?dataset={dataset}&config={config}&split={split}" URL = f"https://huggingface.co/datasets/{split}" return (URL) async def openurl(URL: str) -> str: html = f"{URL}" return (html) def store_message(name: str, message: str): if name and message: with open(DATA_FILE, "a") as csvfile: writer = csv.DictWriter(csvfile, fieldnames=[ "time", "message", "name", ]) writer.writerow( {"time": str(datetime.now()), "message": message.strip(), "name": name.strip() } ) # uncomment line below to begin saving. If creating your own copy you will need to add a access token called "HF_TOKEN" to your profile, then create a secret for your repo with the access code naming it "HF_TOKEN" For the CSV as well you can copy the header and first few lines to your own then update the paths above which should work to save to your own repository for datasets. commit_url = repo.push_to_hub() return "" mname = "facebook/blenderbot-400M-distill" model = BlenderbotForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotTokenizer.from_pretrained(mname) def take_last_tokens(inputs, note_history, history): """Filter the last 128 tokens""" if inputs['input_ids'].shape[1] > 128: inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()]) inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()]) note_history = [' '.join(note_history[0].split(' ')[2:])] history = history[1:] return inputs, note_history, history def add_note_to_history(note, note_history): """Add a note to the historical information""" note_history.append(note) note_history = ' '.join(note_history) return [note_history] title = "💬ChatBack🧠💾" description = """Chatbot With persistent memory dataset allowing multiagent system AI to access a shared dataset as memory pool with stored interactions. Current Best SOTA Chatbot: https://huggingface.co/facebook/blenderbot-400M-distill?text=Hey+my+name+is+ChatBack%21+Are+you+ready+to+rock%3F """ def chat(message, history): history = history or [] if history: history_useful = [' '.join([str(a[0])+' '+str(a[1]) for a in history])] else: history_useful = [] history_useful = add_note_to_history(message, history_useful) inputs = tokenizer(history_useful, return_tensors="pt") inputs, history_useful, history = take_last_tokens(inputs, history_useful, history) reply_ids = model.generate(**inputs) response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0] history_useful = add_note_to_history(response, history_useful) list_history = history_useful[0].split(' ') history.append((list_history[-2], list_history[-1])) store_message(message, response) # Save to dataset -- uncomment with code above, create a dataset to store and add your HF_TOKEN from profile to this repo to use. return history, history gr.Interface( fn=chat, theme="huggingface", css=".footer {display:none !important}", inputs=["text", "state"], outputs=["chatbot", "state"], title=title, allow_flagging="never", description=f"Gradio chatbot backed by memory in a dataset repository.", article=f"The memory dataset for saves is [{DATASET_REPO_URL}]({DATASET_REPO_URL}) 🦃Thanks!🦃 Check out HF Datasets: https://huggingface.co/spaces/awacke1/FreddysDatasetViewer SOTA papers code and datasets on chat are here: https://paperswithcode.com/datasets?q=chat&v=lst&o=newest" ).launch(debug=True)