Spaces:
Runtime error
Runtime error
File size: 7,046 Bytes
c18db37 08af166 6266cf4 ff0ccdb 6266cf4 de6d7ec 9b06b1e 6266cf4 9b06b1e a1b669a ff0ccdb 9b06b1e f240a0c 9b06b1e 8f99b37 9b06b1e de6d7ec 9b06b1e 08af166 c18db37 c60c8cf c18db37 c60c8cf c18db37 9b06b1e dd5e8e8 f60697c c18db37 a1b669a c18db37 9b06b1e c18db37 9b06b1e c18db37 9b06b1e 6ca51ed f60697c 9b06b1e 5816dc1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
import torch
import gradio as gr
# PersistDataset -----
import os
import csv
from gradio import inputs, outputs
import huggingface_hub
from huggingface_hub import Repository, hf_hub_download, upload_file
from datetime import datetime
from typing import List, Dict
import httpx
import pandas as pd
# -------------------------------------------- For Memory - you will need to set up a dataset and HF_TOKEN ---------
UseMemory=True
if UseMemory:
DATASET_REPO_URL="https://huggingface.co/datasets/awacke1/ChatbotMemory.csv"
DATASET_REPO_ID="awacke1/ChatbotMemory.csv"
DATA_FILENAME="ChatbotMemory.csv"
DATA_FILE=os.path.join("data", DATA_FILENAME)
HF_TOKEN=os.environ.get("HF_TOKEN")
if UseMemory:
try:
hf_hub_download(
repo_id=DATASET_REPO_ID,
filename=DATA_FILENAME,
cache_dir=DATA_DIRNAME,
force_filename=DATA_FILENAME
)
except:
print("file not found")
repo = Repository(
local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
)
async def get_splits(dataset_name: str) -> Dict[str, List[Dict]]:
URL = f"https://datasets-server.huggingface.co/splits?dataset={dataset_name}"
async with httpx.AsyncClient() as session:
response = await session.get(URL)
return response.json()
async def get_valid_datasets() -> Dict[str, List[str]]:
URL = f"https://datasets-server.huggingface.co/valid"
async with httpx.AsyncClient() as session:
response = await session.get(URL)
datasets = response.json()["valid"]
return gr.Dropdown.update(choices=datasets, value="kelm")
# The one to watch: https://huggingface.co/rungalileo
# rungalileo/medical_transcription_40
async def get_first_rows(dataset: str, config: str, split: str) -> Dict[str, Dict[str, List[Dict]]]:
URL = f"https://datasets-server.huggingface.co/first-rows?dataset={dataset}&config={config}&split={split}"
async with httpx.AsyncClient() as session:
response = await session.get(URL)
print(URL)
gr.Markdown(URL)
return response.json()
def get_df_from_rows(api_output):
return pd.DataFrame([row["row"] for row in api_output["rows"]])
async def update_configs(dataset_name: str):
splits = await get_splits(dataset_name)
all_configs = sorted(set([s["config"] for s in splits["splits"]]))
return (gr.Dropdown.update(choices=all_configs, value=all_configs[0]),
splits)
async def update_splits(config_name: str, state: gr.State):
splits_for_config = sorted(set([s["split"] for s in state["splits"] if s["config"] == config_name]))
dataset_name = state["splits"][0]["dataset"]
dataset = await update_dataset(splits_for_config[0], config_name, dataset_name)
return (gr.Dropdown.update(choices=splits_for_config, value=splits_for_config[0]), dataset)
async def update_dataset(split_name: str, config_name: str, dataset_name: str):
rows = await get_first_rows(dataset_name, config_name, split_name)
df = get_df_from_rows(rows)
return df
# Guido von Roissum: https://www.youtube.com/watch?v=-DVyjdw4t9I
async def update_URL(dataset: str, config: str, split: str) -> str:
URL = f"https://datasets-server.huggingface.co/first-rows?dataset={dataset}&config={config}&split={split}"
URL = f"https://huggingface.co/datasets/{split}"
return (URL)
async def openurl(URL: str) -> str:
html = f"<a href={URL} target=_blank>{URL}</a>"
return (html)
def store_message(name: str, message: str):
if name and message:
with open(DATA_FILE, "a") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=[ "time", "message", "name", ])
writer.writerow(
{"time": str(datetime.now()), "message": message.strip(), "name": name.strip() }
)
# uncomment line below to begin saving. If creating your own copy you will need to add a access token called "HF_TOKEN" to your profile, then create a secret for your repo with the access code naming it "HF_TOKEN" For the CSV as well you can copy the header and first few lines to your own then update the paths above which should work to save to your own repository for datasets.
commit_url = repo.push_to_hub()
return ""
mname = "facebook/blenderbot-400M-distill"
model = BlenderbotForConditionalGeneration.from_pretrained(mname)
tokenizer = BlenderbotTokenizer.from_pretrained(mname)
def take_last_tokens(inputs, note_history, history):
"""Filter the last 128 tokens"""
if inputs['input_ids'].shape[1] > 128:
inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()])
inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()])
note_history = ['</s> <s>'.join(note_history[0].split('</s> <s>')[2:])]
history = history[1:]
return inputs, note_history, history
def add_note_to_history(note, note_history):
"""Add a note to the historical information"""
note_history.append(note)
note_history = '</s> <s>'.join(note_history)
return [note_history]
title = "💬ChatBack🧠💾"
description = """Chatbot With persistent memory dataset allowing multiagent system AI to access a shared dataset as memory pool with stored interactions.
Current Best SOTA Chatbot: https://huggingface.co/facebook/blenderbot-400M-distill?text=Hey+my+name+is+ChatBack%21+Are+you+ready+to+rock%3F """
def chat(message, history):
history = history or []
if history:
history_useful = ['</s> <s>'.join([str(a[0])+'</s> <s>'+str(a[1]) for a in history])]
else:
history_useful = []
history_useful = add_note_to_history(message, history_useful)
inputs = tokenizer(history_useful, return_tensors="pt")
inputs, history_useful, history = take_last_tokens(inputs, history_useful, history)
reply_ids = model.generate(**inputs)
response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]
history_useful = add_note_to_history(response, history_useful)
list_history = history_useful[0].split('</s> <s>')
history.append((list_history[-2], list_history[-1]))
store_message(message, response) # Save to dataset -- uncomment with code above, create a dataset to store and add your HF_TOKEN from profile to this repo to use.
return history, history
gr.Interface(
fn=chat,
theme="huggingface",
css=".footer {display:none !important}",
inputs=["text", "state"],
outputs=["chatbot", "state"],
title=title,
allow_flagging="never",
description=f"Gradio chatbot backed by memory in a dataset repository.",
article=f"The memory dataset for saves is [{DATASET_REPO_URL}]({DATASET_REPO_URL}) 🦃Thanks!🦃 Check out HF Datasets: https://huggingface.co/spaces/awacke1/FreddysDatasetViewer SOTA papers code and datasets on chat are here: https://paperswithcode.com/datasets?q=chat&v=lst&o=newest"
).launch(debug=True)
|